You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/12/08 23:29:40 UTC

[01/25] ambari git commit: Merge from branch-2.5

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18901 1c5c7df31 -> 4278c4a4f


http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/test/utils/configs/config_initializer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/configs/config_initializer_test.js b/ambari-web/test/utils/configs/config_initializer_test.js
index e951b34..d348055 100644
--- a/ambari-web/test/utils/configs/config_initializer_test.js
+++ b/ambari-web/test/utils/configs/config_initializer_test.js
@@ -905,312 +905,6 @@ describe('App.ConfigInitializer', function () {
 
   });
 
-  describe('config with mount points', function () {
-
-    var localDB = {
-        masterComponentHosts: [
-          {
-            component: 'NAMENODE',
-            hostName: 'h0'
-          },
-          {
-            component: 'SECONDARY_NAMENODE',
-            hostName: 'h4'
-          },
-          {
-            component: 'APP_TIMELINE_SERVER',
-            hostName: 'h0'
-          },
-          {
-            component: 'ZOOKEEPER_SERVER',
-            hostName: 'h0'
-          },
-          {
-            component: 'ZOOKEEPER_SERVER',
-            hostName: 'h1'
-          },
-          {
-            component: 'OOZIE_SERVER',
-            hostName: 'h0'
-          },
-          {
-            component: 'OOZIE_SERVER',
-            hostName: 'h1'
-          },
-          {
-            component: 'NIMBUS',
-            hostName: 'h2'
-          },
-          {
-            component: 'FALCON_SERVER',
-            hostName: 'h3'
-          },
-          {
-            component: 'KAFKA_BROKER',
-            hostName: 'h0'
-          },
-          {
-            component: 'KAFKA_BROKER',
-            hostName: 'h1'
-          }
-        ],
-        slaveComponentHosts: [
-          {
-            componentName: 'DATANODE',
-            hosts: [
-              {
-                hostName: 'h0'
-              },
-              {
-                hostName: 'h1'
-              }
-            ]
-          },
-          {
-            componentName: 'TASKTRACKER',
-            hosts: [
-              {
-                hostName: 'h0'
-              },
-              {
-                hostName: 'h1'
-              }
-            ]
-          },
-          {
-            componentName: 'NODEMANAGER',
-            hosts: [
-              {
-                hostName: 'h0'
-              },
-              {
-                hostName: 'h1'
-              },
-              {
-                hostName: 'h4'
-              }
-            ]
-          },
-          {
-            componentName: 'HBASE_REGIONSERVER',
-            hosts: [
-              {
-                hostName: 'h0'
-              },
-              {
-                hostName: 'h1'
-              }
-            ]
-          },
-          {
-            componentName: 'SUPERVISOR',
-            hosts: [
-              {
-                hostName: 'h0'
-              },
-              {
-                hostName: 'h1'
-              }
-            ]
-          }
-        ],
-        hosts: {
-          h0: {
-            disk_info: [
-              {
-                mountpoint: '/'
-              },
-              {
-                mountpoint: '/home'
-              },
-              {
-                mountpoint: '/boot'
-              },
-              {
-                mountpoint: '/boot/efi'
-              },
-              {
-                mountpoint: '/mnt'
-              },
-              {
-                mountpoint: '/mnt/efi'
-              },
-              {
-                mountpoint: '/media/disk0',
-                available: '100000000'
-              },
-              {
-                mountpoint: '/mount0',
-                available: '100000000'
-              }
-            ]
-          },
-          h4: {
-            disk_info: [
-              {
-                mountpoint: 'c:',
-                available: '100000000'
-              }
-            ]
-          }
-        }
-      },
-      cases = [
-        {
-          name: 'dfs.namenode.name.dir',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n'
-        },
-        {
-          name: 'dfs.name.dir',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n'
-        },
-        {
-          name: 'fs.checkpoint.dir',
-          isOnlyFirstOneNeeded: true,
-          value: 'file:///c:/default\n'
-        },
-        {
-          name: 'dfs.namenode.checkpoint.dir',
-          isOnlyFirstOneNeeded: true,
-          value: 'file:///c:/default\n'
-        },
-        {
-          name: 'dfs.data.dir',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
-        },
-        {
-          name: 'dfs.datanode.data.dir',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
-        },
-        {
-          name: 'mapred.local.dir',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
-        },
-        {
-          name: 'yarn.nodemanager.log-dirs',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
-        },
-        {
-          name: 'yarn.nodemanager.local-dirs',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
-        },
-        {
-          name: 'yarn.timeline-service.leveldb-timeline-store.path',
-          isOnlyFirstOneNeeded: true,
-          value: '/media/disk0/default'
-        },
-        {
-          name: 'yarn.timeline-service.leveldb-state-store.path',
-          isOnlyFirstOneNeeded: true,
-          value: '/media/disk0/default'
-        },
-        {
-          name: 'dataDir',
-          isOnlyFirstOneNeeded: true,
-          value: '/media/disk0/default'
-        },
-        {
-          name: 'oozie_data_dir',
-          isOnlyFirstOneNeeded: true,
-          value: '/media/disk0/default'
-        },
-        {
-          name: 'storm.local.dir',
-          isOnlyFirstOneNeeded: true,
-          value: '/media/disk0/default'
-        },
-        {
-          name: '*.falcon.graph.storage.directory',
-          isOnlyFirstOneNeeded: true,
-          value: '/default'
-        },
-        {
-          name: '*.falcon.graph.serialize.path',
-          isOnlyFirstOneNeeded: true,
-          value: '/default'
-        },
-        {
-          name: 'log.dirs',
-          isOnlyFirstOneNeeded: false,
-          value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
-        }
-      ];
-
-    beforeEach(function () {
-      sinon.stub(App.Host, 'find').returns([
-        Em.Object.create({
-          id: 'h1',
-          diskInfo: [
-            {
-              mountpoint: '/media/disk1',
-              type: 'devtmpfs'
-            },
-            {
-              mountpoint: '/media/disk1',
-              type: 'tmpfs'
-            },
-            {
-              mountpoint: '/media/disk1',
-              type: 'vboxsf'
-            },
-            {
-              mountpoint: '/media/disk1',
-              type: 'CDFS'
-            },
-            {
-              mountpoint: '/media/disk1',
-              available: '0'
-            },
-            {
-              mountpoint: '/media/disk1',
-              available: '100000000'
-            },
-            {
-              mountpoint: '/mount1',
-              available: '100000000'
-            }
-          ]
-        }),
-        Em.Object.create({
-          id: 'h2',
-          diskInfo: [
-            {
-              mountpoint: '/'
-            }
-          ]
-        }),
-        Em.Object.create({
-          id: 'h3',
-          diskInfo: []
-        })
-      ]);
-    });
-
-    afterEach(function () {
-      App.Host.find.restore();
-    });
-
-    cases.forEach(function (item) {
-      it(item.name, function () {
-        serviceConfigProperty.setProperties({
-          name: item.name,
-          recommendedValue: '/default'
-        });
-        App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, {});
-        expect(serviceConfigProperty.get('value')).to.equal(item.value);
-        expect(serviceConfigProperty.get('recommendedValue')).to.equal(item.value);
-      });
-    });
-
-  });
-
   describe('initializerTypes', function () {
     var types = App.ConfigInitializer.get('initializerTypes');
     Em.keys(types).forEach(function(type) {
@@ -1275,155 +969,4 @@ describe('App.ConfigInitializer', function () {
     });
   });
 
-  describe('#_filterMountPoint', function() {
-    [
-      {
-        mPoint: {
-          mountpoint: '/'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/home'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/etc/resolv.conf'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/etc/hostname'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/etc/hosts'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/boot'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/mnt'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/tmp'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/some-dir',
-          type: 'devtmpfs'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/some-dir',
-          type: 'tmpfs'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/some-dir',
-          type: 'vboxsf'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/some-dir',
-          type: 'CDFS'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/usr/hdp'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/usr/hdp/1'
-        },
-        localDB: {},
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/usr/hdp/current'
-        },
-        localDB: {},
-        e: true
-      },
-      {
-        mPoint: {
-          mountpoint: '/usr/hdp/2.5'
-        },
-        localDB: {
-          selectedStack: {
-            repository_version: '2.5'
-          }
-        },
-        e: true
-      },
-      {
-        mPoint: {
-          mountpoint: '/usr/hdp/2.5.0'
-        },
-        localDB: {
-          selectedStack: {
-            repository_version: '2.5'
-          }
-        },
-        e: false
-      },
-      {
-        mPoint: {
-          mountpoint: '/normal/directory'
-        },
-        localDB: {
-          selectedStack: {
-            repository_version: '2.5'
-          }
-        },
-        e: true
-      }
-    ].forEach(function(test) {
-      it('mount point "{0}" should be {1}'.format(test.mPoint.mountpoint, test.e ? 'valid' : 'invalid'), function() {
-        var fFn = App.ConfigInitializer._filterMountPoint(test.localDB);
-        expect(fFn(test.mPoint)).to.be.equal(test.e);
-      });
-    });
-  });
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index 5630a8c..075bcc8 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -416,7 +416,7 @@ describe('App.upgradeWizardView', function () {
     });
   });
 
-  App.TestAliases.testAsComputedAnd(getView(), 'isDowngradeAvailable', ['!controller.isDowngrade', 'controller.downgradeAllowed']);
+  App.TestAliases.testAsComputedAnd(getView(), 'isDowngradeAvailable', ['!controller.isDowngrade', 'controller.downgradeAllowed', '!controller.cantBeStarted']);
 
   describe("#taskDetails", function () {
     it("runningItem present", function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/utils/perf/deploy-gce-perf-cluster.py
----------------------------------------------------------------------
diff --git a/contrib/utils/perf/deploy-gce-perf-cluster.py b/contrib/utils/perf/deploy-gce-perf-cluster.py
index 4737c6f..fe5f22d 100644
--- a/contrib/utils/perf/deploy-gce-perf-cluster.py
+++ b/contrib/utils/perf/deploy-gce-perf-cluster.py
@@ -28,7 +28,7 @@ import re
 import socket
 
 cluster_prefix = "perf"
-ambari_repo_file_url = "http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/2.x/latest/trunk/ambaribn.repo"
+ambari_repo_file_url = "http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/2.x/updates/2.5.0.0/ambaribn.repo"
 
 public_hostname_script = "foo"
 hostname_script = "foo"
@@ -280,13 +280,13 @@ def create_vms(args, number_of_nodes):
   :param number_of_nodes: Number of VMs to request.
   """
   print "Creating server VM {0}-server-{1} with xxlarge nodes on centos6...".format(cluster_prefix, args.cluster_suffix)
-  execute_command(args, args.controller, "/usr/sbin/gce up {0}-server-{1} 1 --centos6 --xxlarge".format(cluster_prefix, args.cluster_suffix),
+  execute_command(args, args.controller, "/usr/sbin/gce up {0}-server-{1} 1 --centos6 --xxlarge --ex --disk-xxlarge".format(cluster_prefix, args.cluster_suffix),
                   "Failed to create server, probably not enough resources!", "-tt")
   time.sleep(10)
 
   # trying to create cluster with needed params
-  print "Creating agent VMs {0}-agent-{1} with {2} large nodes on centos6...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
-  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --large".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
+  print "Creating agent VMs {0}-agent-{1} with {2} xlarge nodes on centos6...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
+  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --xlarge --ex --disk-large".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
                   "Failed to create cluster VMs, probably not enough resources!", "-tt")
 
   # VMs are not accessible immediately
@@ -331,7 +331,7 @@ def create_server_script(server_host_name):
   contents = "#!/bin/bash\n" + \
   "wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
   "yum clean all; yum install git ambari-server -y\n" + \
-  "mkdir /home ; cd /home ; git clone https://github.com/apache/ambari.git\n" + \
+  "mkdir /home ; cd /home ; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-server/resources/stacks/PERF\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
   "\n" + \
@@ -357,6 +357,7 @@ def create_server_script(server_host_name):
   "sed -i -e 's/local.database.user=postgres//g' /etc/ambari-server/conf/ambari.properties\n" + \
   "sed -i -e 's/server.jdbc.postgres.schema=ambari//g' /etc/ambari-server/conf/ambari.properties\n" + \
   "sed -i -e 's/false/true/g' /var/lib/ambari-server/resources/stacks/PERF/1.0/metainfo.xml\n" + \
+  "sed -i -e 's/-Xmx2048m/-Xmx16384m/g' /var/lib/ambari-server/ambari-env.sh\n" + \
   "\n" + \
   "echo 'server.jdbc.driver=com.mysql.jdbc.Driver' >> /etc/ambari-server/conf/ambari.properties\n" + \
   "echo 'server.jdbc.rca.url=jdbc:mysql://{0}:3306/ambari' >> /etc/ambari-server/conf/ambari.properties\n".format(server_host_name) + \
@@ -384,7 +385,7 @@ def create_agent_script(server_host_name):
   contents = "#!/bin/bash\n" + \
   "wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
   "yum clean all; yum install git ambari-agent -y\n" + \
-  "mkdir /home ; cd /home; git clone https://github.com/apache/ambari.git\n" + \
+  "mkdir /home ; cd /home; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
   "sed -i -e 's/hostname=localhost/hostname={0}/g' /etc/ambari-agent/conf/ambari-agent.ini\n".format(server_host_name) + \
   "sed -i -e 's/agent]/agent]\\nhostname_script={0}\\npublic_hostname_script={1}\\n/1' /etc/ambari-agent/conf/ambari-agent.ini\n".format(hostname_script, public_hostname_script) + \

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/capacity-scheduler/src/main/resources/ui/app/app.js
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/app.js b/contrib/views/capacity-scheduler/src/main/resources/ui/app/app.js
index fa1e05a..5854a09 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/app.js
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/app.js
@@ -53,4 +53,16 @@ Ember.Application.initializer({
   }
 });
 
-module.exports = Em.Application.create();
+module.exports = Em.Application.create({
+  Resolver: Ember.DefaultResolver.extend({
+    resolveTemplate: function(parsedName) {
+      var resolvedTemplate = this._super(parsedName);
+      var templateName = 'templates/' + parsedName.fullNameWithoutType.replace(/\./g, '/');
+      if (resolvedTemplate) {
+        return resolvedTemplate;
+      } else {
+        return Ember.TEMPLATES[templateName];
+      }
+    }
+  })
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/capacity-scheduler/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/view.xml b/contrib/views/capacity-scheduler/src/main/resources/view.xml
index 84a9ee0..cba243c 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/view.xml
+++ b/contrib/views/capacity-scheduler/src/main/resources/view.xml
@@ -23,19 +23,22 @@
     <min-ambari-version>2.1.*</min-ambari-version>
     <cluster-config-options>AMBARI-ONLY</cluster-config-options>
 
-  <resource>
-      <name>scheduler</name>
-      <service-class>org.apache.ambari.view.capacityscheduler.CapacitySchedulerService</service-class>
+    <resource>
+        <name>scheduler</name>
+        <service-class>org.apache.ambari.view.capacityscheduler.CapacitySchedulerService</service-class>
     </resource>
 
     <auto-instance>
-      <name>AUTO_CS_INSTANCE</name>
-      <label>YARN Queue Manager</label>
-      <description>Manage YARN Capacity Scheduler Queues</description>
-      <stack-id>HDP-2.*</stack-id>
-      <services>
-        <service>YARN</service>
-      </services>
+        <name>AUTO_CS_INSTANCE</name>
+        <label>YARN Queue Manager</label>
+        <description>Manage YARN Capacity Scheduler Queues</description>
+        <stack-id>HDP-2.*</stack-id>
+        <services>
+            <service>YARN</service>
+        </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+        </roles>
     </auto-instance>
 
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/files/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/files/src/main/resources/view.xml b/contrib/views/files/src/main/resources/view.xml
index d6b2fde..640cee9 100644
--- a/contrib/views/files/src/main/resources/view.xml
+++ b/contrib/views/files/src/main/resources/view.xml
@@ -155,5 +155,12 @@
         <services>
           <service>HDFS</service>
         </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+            <role>CLUSTER.OPERATOR</role>
+            <role>SERVICE.ADMINISTRATOR</role>
+            <role>SERVICE.OPERATOR</role>
+            <role>CLUSTER.USER</role>
+        </roles>
     </auto-instance>
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/hive-next/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/src/main/resources/view.xml b/contrib/views/hive-next/src/main/resources/view.xml
index 02e632c..1107cd1 100644
--- a/contrib/views/hive-next/src/main/resources/view.xml
+++ b/contrib/views/hive-next/src/main/resources/view.xml
@@ -337,5 +337,12 @@
         <services>
             <service>HIVE</service>
         </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+            <role>CLUSTER.OPERATOR</role>
+            <role>SERVICE.ADMINISTRATOR</role>
+            <role>SERVICE.OPERATOR</role>
+            <role>CLUSTER.USER</role>
+        </roles>
     </auto-instance>
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/pig/src/main/resources/ui/pig-web/app/app.js
----------------------------------------------------------------------
diff --git a/contrib/views/pig/src/main/resources/ui/pig-web/app/app.js b/contrib/views/pig/src/main/resources/ui/pig-web/app/app.js
index 866b8a7..c683f72 100644
--- a/contrib/views/pig/src/main/resources/ui/pig-web/app/app.js
+++ b/contrib/views/pig/src/main/resources/ui/pig-web/app/app.js
@@ -35,5 +35,17 @@ module.exports = Em.Application.create({
     }
     var namespaceUrl = 'api/v1/views' + view + version + '/instances' + instance;
     return namespaceUrl;
-  }
+  },
+
+  Resolver: Ember.DefaultResolver.extend({
+    resolveTemplate: function(parsedName) {
+      var resolvedTemplate = this._super(parsedName);
+      var templateName = 'templates/' + parsedName.fullNameWithoutType.replace(/\./g, '/');
+      if (resolvedTemplate) {
+        return resolvedTemplate;
+      } else {
+        return Ember.TEMPLATES[templateName];
+      }
+    }
+  })
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/pom.xml b/contrib/views/pom.xml
index ca5aef4..440274b 100644
--- a/contrib/views/pom.xml
+++ b/contrib/views/pom.xml
@@ -46,7 +46,6 @@
     <module>tez</module>
     <module>hawq</module>
     <module>storm</module>
-    <module>zeppelin</module>
     <module>hueambarimigration</module>
     <module>hive-next</module>
     <module>hive</module>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/tez/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/tez/src/main/resources/view.xml b/contrib/views/tez/src/main/resources/view.xml
index 2bb2bb0..41ffdbc 100644
--- a/contrib/views/tez/src/main/resources/view.xml
+++ b/contrib/views/tez/src/main/resources/view.xml
@@ -91,5 +91,12 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
     <services>
       <service>TEZ</service>
     </services>
+    <roles>
+      <role>CLUSTER.ADMINISTRATOR</role>
+      <role>CLUSTER.OPERATOR</role>
+      <role>SERVICE.ADMINISTRATOR</role>
+      <role>SERVICE.OPERATOR</role>
+      <role>CLUSTER.USER</role>
+    </roles>
   </auto-instance>
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
index 0a04d25..f17f89e 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
@@ -52,7 +52,7 @@
             <li {{action 'getJobDag'}} role="presentation"><a href="#jobDag" aria-controls="jobDag" role="tab" data-toggle="tab">Flow Graph</a></li>
             <li role="presentation" class="pull-right">
                 {{#link-to 'design' (query-params appPath=model.appPath) class="backto-designer" }}
-                    <button type="button" class="btn btn-success" title="Back" >
+                    <button type="button" class="btn btn-success" title="Edit Workflow" >
                        Edit Workflow
                     </button>
                 {{/link-to}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/pom.xml b/contrib/views/zeppelin/pom.xml
deleted file mode 100644
index 6a7f585..0000000
--- a/contrib/views/zeppelin/pom.xml
+++ /dev/null
@@ -1,190 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari.contrib.views</groupId>
-  <artifactId>zeppelin-view</artifactId>
-  <version>1.0.0.0</version>
-  <name>Zeppelin</name>
-
-  <parent>
-    <groupId>org.apache.ambari.contrib.views</groupId>
-    <artifactId>ambari-contrib-views</artifactId>
-    <version>2.5.0.0.0</version>
-  </parent>
-
-  <dependencies>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <version>4.8.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.easymock</groupId>
-      <artifactId>easymock</artifactId>
-      <version>3.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.ambari</groupId>
-      <artifactId>ambari-views</artifactId>
-      <version>2.5.0.0.0</version>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-server</artifactId>
-      <version>1.8</version>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>servlet-api</artifactId>
-      <version>2.5</version>
-      <scope>provided</scope>
-    </dependency>
-
-
-    <dependency>
-      <groupId>commons-configuration</groupId>
-      <artifactId>commons-configuration</artifactId>
-      <version>1.6</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-csv</artifactId>
-      <version>1.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-collections4</artifactId>
-      <version>4.0</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-      <version>1.7.5</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.ambari.contrib.views</groupId>
-      <artifactId>ambari-views-utils</artifactId>
-      <version>2.5.0.0.0</version>
-    </dependency>
-    <dependency>
-      <groupId>org.json</groupId>
-      <artifactId>json</artifactId>
-      <version>20160212</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.httpcomponents</groupId>
-      <artifactId>httpclient</artifactId>
-      <version>4.5.2</version>
-    </dependency>
-
-
-  </dependencies>
-
-  <properties>
-    <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
-    <hive-version>1.0.0</hive-version>
-    <ambari.version>2.5.0.0.0</ambari.version>
-  </properties>
-  <build>
-    <plugins>
-
-      <!-- Building frontend -->
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.1</version>
-        <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
-        </configuration>
-      </plugin>
-      <plugin>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>copy-dependencies</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${project.build.directory}/lib</outputDirectory>
-              <includeScope>runtime</includeScope>
-            </configuration>
-          </execution>
-          <execution>
-            <id>copy-artifact</id>
-            <phase>package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>${project.groupId}</groupId>
-                  <artifactId>${project.artifactId}</artifactId>
-                  <version>${project.version}</version>
-                  <type>${project.packaging}</type>
-                </artifactItem>
-              </artifactItems>
-              <outputDirectory>${views.jars.dir.rel}</outputDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.vafer</groupId>
-        <artifactId>jdeb</artifactId>
-        <version>1.0.1</version>
-        <executions>
-          <execution>
-            <phase>none</phase>
-            <goals>
-              <goal>jdeb</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <submodules>false</submodules>
-        </configuration>
-      </plugin>
-    </plugins>
-    <resources>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>true</filtering>
-        <includes>
-          <include>WEB-INF/web.xml</include>
-          <include>META-INF/**/*</include>
-          <include>view.xml</include>
-          <include>WEB-INF/index.jsp</include>
-        </includes>
-      </resource>
-      <resource>
-        <targetPath>WEB-INF/lib</targetPath>
-        <filtering>false</filtering>
-        <directory>target/lib</directory>
-      </resource>
-    </resources>
-  </build>
-</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServiceCheck.java
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServiceCheck.java b/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServiceCheck.java
deleted file mode 100644
index 8f94260..0000000
--- a/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServiceCheck.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.view.zeppelin;
-
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.json.JSONObject;
-
-import javax.servlet.ServletConfig;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-
-public class ZeppelinServiceCheck {
-    static String check(String host, String port) {
-        String url = "http://" + host + ":" + port;
-        JSONObject json = new JSONObject();
-        json.put("url", url);
-        try {
-            HttpClient httpclient = HttpClientBuilder.create().build();
-            HttpGet httpget = new HttpGet(url);
-            HttpResponse httpresponse = httpclient.execute(httpget);
-            if (httpresponse.getStatusLine().getStatusCode() == 200) {
-                json.put("status", "SUCCESS");
-                json.put("message", "Zeppelin is running");
-            }
-        } catch (Exception e) {
-            json.put("status", "ERROR");
-            json.put("message", "Zeppelin is not running");
-        }
-        return json.toString();
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java b/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
deleted file mode 100644
index ab61a3d..0000000
--- a/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.view.zeppelin;
-
-import org.apache.ambari.view.ViewContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.List;
-
-
-public class ZeppelinServlet extends HttpServlet {
-  private ViewContext viewContext;
-  private final static Logger LOG = LoggerFactory.getLogger(ZeppelinServlet.class);
-
-  @Override
-  public void init(ServletConfig config) throws ServletException {
-    super.init(config);
-
-    ServletContext context = config.getServletContext();
-    viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
-  }
-
-  public void service(HttpServletRequest req,
-                      HttpServletResponse res) throws IOException {
-    res.setContentType("text/html");
-    PrintWriter out = res.getWriter();
-    String publicName = "";
-    String port = "";
-    try {
-      port = viewContext.getProperties().get("zeppelin.server.port");
-      if (viewContext.getCluster() != null) {
-        List<String> hostsForServiceComponents = viewContext.getCluster().getHostsForServiceComponent
-            ("ZEPPELIN", "ZEPPELIN_MASTER");
-        publicName = hostsForServiceComponents.get(0);
-      } else {
-        publicName = viewContext.getProperties().get("zeppelin.host.publicname");
-      }
-    } catch (Exception e) {
-      LOG.error("Zeppelin view servlet failed", e);
-    }
-    out.println("<html lang=\"en\">" +
-        "<head>" +
-        "    <meta charset=\"utf-8\"/>" +
-        "    <link rel=\"stylesheet\" href=\"/stylesheets/vendor.css\">" +
-        "</head>" +
-        "<body>" +
-        "" +
-        "<div class=\"container-fluid\" id=\"messageContainer\" style=\"display:none;\">" +
-        "    <h1>Welcome to the Zeppelin View</h1>" +
-        "    <h3>Service check failed</h3>" +
-        "" +
-        "    <table class=\"table\">" +
-        "        <tbody>" +
-        "        <tr>" +
-        "            <td>zeppelin service is not running</td>" +
-        "        </tr>" +
-        "        </tbody>" +
-        "    </table>" +
-        "" +
-        "</div>" +
-        "" +
-        "<iframe id='zeppelinIFrame' width=\"100%\" seamless=\"seamless\" style=\"border: 0px;\"></iframe>" +
-        "<script>" +
-        "var $ = jQuery = parent.jQuery;" +
-        "var iframe = document.querySelector('#zeppelinIFrame');" +
-        "var messageContainer = document.querySelector('#messageContainer');" +
-        "" +
-        "var serviceCheckResponse = $.parseJSON(' " + ZeppelinServiceCheck.check(publicName, port) + "');" +
-        "" +
-        "if (serviceCheckResponse.status === \"SUCCESS\") {" +
-        "    messageContainer.style.display = \"none\";" +
-        "    iframe.style.display = \"block\";" +
-        "    iframe.src = serviceCheckResponse.url;" +
-        "    iframe.height = window.innerHeight;" +
-        "} else {" +
-        "    messageContainer.style.display = \"block\";" +
-        "    iframe.style.display = \"none\";" +
-        "}" +
-        "" +
-        "$(window).resize(function () {" +
-        "    iframe.height = window.innerHeight;" +
-        "});" +
-        "</script>" +
-        "</body>" +
-        "</html>");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml b/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
deleted file mode 100644
index 6b2d156..0000000
--- a/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1" ?>
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
--->
-
-<web-app xmlns="http://java.sun.com/xml/ns/j2ee"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"
-         version="2.4">
-
-  <display-name>Zeppelin view servlet</display-name>
-  <description>
-    This is the Zeppelin view servlet application.
-  </description>
-
-  <servlet>
-    <servlet-name>ZeppelinServlet</servlet-name>
-    <servlet-class>org.apache.ambari.view.zeppelin.ZeppelinServlet</servlet-class>
-  </servlet>
-
-  <servlet-mapping>
-    <servlet-name>ZeppelinServlet</servlet-name>
-    <url-pattern>/</url-pattern>
-  </servlet-mapping>
-
-</web-app>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/src/main/resources/view.log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/view.log4j.properties b/contrib/views/zeppelin/src/main/resources/view.log4j.properties
deleted file mode 100644
index 15d264e..0000000
--- a/contrib/views/zeppelin/src/main/resources/view.log4j.properties
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.appender.zeppelinView=org.apache.log4j.RollingFileAppender
-log4j.appender.zeppelinView.File=${ambari.log.dir}/zeppelin-view/zeppelin-view.log
-log4j.appender.zeppelinView.MaxFileSize=80MB
-log4j.appender.zeppelinView.MaxBackupIndex=60
-log4j.appender.zeppelinView.layout=org.apache.log4j.PatternLayout
-log4j.appender.zeppelinView.layout.ConversionPattern=%d{DATE} %5p [%t] [%X{viewName} %X{viewVersion} %X{viewInstanceName}] %c{1}:%L - %m%n
-
-log4j.logger.org.apache.ambari.view.zeppelin=INFO,zeppelinView
-log4j.additivity.org.apache.ambari.view.zeppelin=false
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/contrib/views/zeppelin/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/view.xml b/contrib/views/zeppelin/src/main/resources/view.xml
deleted file mode 100644
index 323e09a..0000000
--- a/contrib/views/zeppelin/src/main/resources/view.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
--->
-<view>
-  <name>ZEPPELIN</name>
-  <label>Zeppelin View!</label>
-  <version>1.0.0</version>
-  <build>${env.BUILD_NUMBER}</build>
-  <description>Ambari view for Apache Zeppelin</description>
-
-  <parameter>
-    <name>zeppelin.server.port</name>
-    <description>Zeppelin Http port (example: 9995).</description>
-    <label>Zeppelin Http port</label>
-    <cluster-config>zeppelin-config/zeppelin.server.port</cluster-config>
-    <required>true</required>
-  </parameter>
-  <parameter>
-    <name>zeppelin.host.publicname</name>
-    <description>Zeppelin host name</description>
-    <label>Zeppelin host name</label>
-    <cluster-config>fake</cluster-config>
-    <required>true</required>
-  </parameter>
-
-  <auto-instance>
-    <name>AUTO_ZEPPELIN_INSTANCE</name>
-    <label>Zeppelin View</label>
-    <description>This view instance is auto created when the Zeppelin service is added to a cluster.</description>
-    <stack-id>HDP-2.*</stack-id>
-    <services>
-      <service>ZEPPELIN</service>
-    </services>
-  </auto-instance>
-</view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 95465d5..b400268 100644
--- a/pom.xml
+++ b/pom.xml
@@ -266,6 +266,7 @@
             <exclude>contrib/agent-simulator/docker_image/package_list.txt</exclude>
             <exclude>contrib/agent-simulator/config/cluster.txt</exclude>
             <exclude>version</exclude>
+            <exclude>**/target/surefire-reports/</exclude>
             <!--IDE and GIT files-->
             <exclude>**/.idea/</exclude>
             <exclude>**/.classpath/</exclude>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/utility/pom.xml
----------------------------------------------------------------------
diff --git a/utility/pom.xml b/utility/pom.xml
index 2febb83..8380db9 100644
--- a/utility/pom.xml
+++ b/utility/pom.xml
@@ -20,6 +20,13 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
+  <parent>
+    <groupId>org.apache.ambari</groupId>
+    <artifactId>ambari-project</artifactId>
+    <version>2.5.0.0.0</version>
+    <relativePath>../ambari-project</relativePath>
+  </parent>
+
   <artifactId>utility</artifactId>
   <groupId>utility</groupId>
   <version>1.0.0.0-SNAPSHOT</version>
@@ -28,9 +35,25 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
-      <version>4.12</version>
       <scope>compile</scope>    <!-- has to be compile-time dependency on junit -->
     </dependency>
+    <dependency>
+      <groupId>com.puppycrawl.tools</groupId>
+      <artifactId>checkstyle</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.puppycrawl.tools</groupId>
+      <artifactId>checkstyle</artifactId>
+      <type>test-jar</type>
+      <version>${checkstyle.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>19.0</version> <!-- required for checkstyle -->
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -45,6 +68,14 @@
         </configuration>
       </plugin>
       <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>${project.parent.basedir}/src/main/assemblies/empty.xml</descriptor>
+          </descriptors>
+        </configuration>
+      </plugin>
+      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
         <version>2.0.1</version>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
----------------------------------------------------------------------
diff --git a/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java b/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
new file mode 100644
index 0000000..80da103
--- /dev/null
+++ b/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import com.puppycrawl.tools.checkstyle.api.AbstractCheck;
+import com.puppycrawl.tools.checkstyle.api.DetailAST;
+import com.puppycrawl.tools.checkstyle.api.TokenTypes;
+
+/**
+ * Detects private methods annotated as <code>Transactional</code>.
+ * See https://github.com/google/guice/wiki/Transactions for why this should be
+ * avoided.
+ */
+public class AvoidTransactionalOnPrivateMethodsCheck extends AbstractCheck {
+
+  private static final String ANNOTATION_NAME = "Transactional";
+  public static final String MSG_TRANSACTIONAL_ON_PRIVATE_METHOD = "@" + ANNOTATION_NAME + " should not be used on private methods";
+
+  @Override
+  public int[] getDefaultTokens() {
+    return new int[] { TokenTypes.METHOD_DEF };
+  }
+
+  @Override
+  public void visitToken(DetailAST ast) {
+    DetailAST modifiers = ast.findFirstToken(TokenTypes.MODIFIERS);
+    if (modifiers.findFirstToken(TokenTypes.LITERAL_PRIVATE) != null) {
+      DetailAST annotation = modifiers.findFirstToken(TokenTypes.ANNOTATION);
+      while (annotation != null) {
+        DetailAST name = annotation.findFirstToken(TokenTypes.IDENT);
+        if (name != null && ANNOTATION_NAME.equals(name.getText())) {
+          log(ast.getLineNo(), MSG_TRANSACTIONAL_ON_PRIVATE_METHOD);
+          break;
+        }
+        annotation = annotation.getNextSibling();
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/utility/src/main/resources/checkstyle_packages.xml
----------------------------------------------------------------------
diff --git a/utility/src/main/resources/checkstyle_packages.xml b/utility/src/main/resources/checkstyle_packages.xml
new file mode 100644
index 0000000..8cf4a44
--- /dev/null
+++ b/utility/src/main/resources/checkstyle_packages.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
+  may not use this file except in compliance with the License. You may obtain
+  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
+  required by applicable law or agreed to in writing, software distributed
+  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  See accompanying LICENSE file. -->
+<!DOCTYPE checkstyle-packages PUBLIC
+  "-//Puppy Crawl//DTD Package Names 1.0//EN"
+  "http://www.puppycrawl.com/dtds/packages_1_0.dtd">
+<checkstyle-packages>
+  <package name="org.apache.ambari.checkstyle"/>
+</checkstyle-packages>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
----------------------------------------------------------------------
diff --git a/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java b/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
new file mode 100644
index 0000000..14d7486
--- /dev/null
+++ b/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import static org.apache.ambari.checkstyle.AvoidTransactionalOnPrivateMethodsCheck.MSG_TRANSACTIONAL_ON_PRIVATE_METHOD;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.puppycrawl.tools.checkstyle.BaseCheckTestSupport;
+import com.puppycrawl.tools.checkstyle.DefaultConfiguration;
+
+import org.junit.Test;
+
+public class AvoidTransactionalOnPrivateMethodsCheckTest extends BaseCheckTestSupport {
+
+  @Override
+  protected String getPath(String filename) throws IOException {
+    return new File("src/test/resources/org/apache/ambari/checkstyle/" + filename)
+      .getCanonicalPath();
+  }
+
+  @Test
+  public void transactionalOnPrivateMethod() throws Exception {
+    final DefaultConfiguration config = createCheckConfig(AvoidTransactionalOnPrivateMethodsCheck.class);
+    final String[] expected = {
+      "32: " + MSG_TRANSACTIONAL_ON_PRIVATE_METHOD,
+      "41: " + MSG_TRANSACTIONAL_ON_PRIVATE_METHOD,
+    };
+
+    verify(config, getPath("InputTransactionalOnPrivateMethods.java"), expected);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
----------------------------------------------------------------------
diff --git a/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java b/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
new file mode 100644
index 0000000..2ea5444
--- /dev/null
+++ b/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import com.google.inject.persist.Transactional;
+
+/**
+ * Input file for AvoidTransactionalOnPrivateMethodsCheckTest.
+ */
+public class InputTransactionalOnPrivateMethods {
+
+  @Transactional
+  public void publicMethodWithTransactional() {
+    ;
+  }
+
+  @Transactional
+  private void privateMethodWithTransactional() {
+    ;
+  }
+
+  private void privateMethodWithoutTransactional() {
+    ;
+  }
+
+  @Transactional
+  private void otherPrivateMethodWithTransactional() {
+    ;
+  }
+
+}


[19/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
index 3b43cc0..54c92d8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>HIVE</name>
-      <version>1.2.1.2.5</version>
+      <version>1.2.1.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
index 12f6c45..047bbd2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>KAFKA</name>
-      <version>0.10.0.2.5</version>
+      <version>0.10.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
index 8f303a5..cd7b92c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>KNOX</name>
-      <version>0.9.0.2.5</version>
+      <version>0.11.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
index 9ed6957..3fe4d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>MAHOUT</name>
-      <version>0.9.0.2.5</version>
+      <version>0.9.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
index 9e845c3..6392ed4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
@@ -20,6 +20,7 @@
   <services>
     <service>
       <name>OOZIE</name>
+      <version>4.2.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
index 0dbf74c..266aa5e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>PIG</name>
-      <version>0.16.0.2.5</version>
+      <version>0.16.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
index 6287e00..a115134 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
@@ -23,7 +23,7 @@
   <services>
     <service>
       <name>RANGER</name>
-      <version>0.6.0.2.5</version>
+      <version>0.7.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
index ab065d9..1c0a2db 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
@@ -23,7 +23,7 @@
   <services>
     <service>
       <name>RANGER_KMS</name>
-      <version>0.6.0.2.5</version>
+      <version>0.7.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
index 6502bfe..0447906 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>SLIDER</name>
-      <version>0.80.0.2.5</version>
+      <version>0.91.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
new file mode 100644
index 0000000..b7bfa73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>livy.environment</name>
+    <value>production</value>
+    <description>
+            Specifies Livy's environment. May either be "production" or "development". In "development"
+            mode, Livy will enable debugging options, such as reporting possible routes on a 404.
+            defaults to development
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.port</name>
+    <value>8998</value>
+    <description>
+            What port to start the server on. Defaults to 8998.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.session.timeout</name>
+    <value>3600000</value>
+    <description>
+            Time in milliseconds on how long Livy will wait before timing out an idle session.
+            Default is one hour.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.impersonation.enabled</name>
+    <value>true</value>
+    <description>
+            If livy should use proxy users when submitting a job.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.csrf_protection.enabled</name>
+    <value>true</value>
+    <description>
+            Whether to enable csrf protection for livy's rest api.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+     <name>livy.spark.master</name>
+     <value>yarn-cluster</value>
+     <description>
+           spark.master property for spark engine
+     </description>
+     <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
new file mode 100644
index 0000000..d4f27bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <description>spark-blacklist.properties</description>
+    <value>
+#
+# Configuration override / blacklist. Defines a list of properties that users are not allowed
+# to override when starting Spark sessions.
+#
+# This file takes a list of property names (one per line). Empty lines and lines starting with "#"
+# are ignored.
+#
+
+# Disallow overriding the master and the deploy mode.
+spark.master
+spark.submit.deployMode
+
+# Disallow overriding the location of Spark cached jars.
+spark.yarn.jar
+spark.yarn.jars
+spark.yarn.archive
+
+# Don't allow users to override the RSC timeout.
+livy.rsc.server.idle_timeout
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
index d1129cd..21cbd0e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
@@ -23,7 +23,7 @@
   <services>
     <service>
       <name>SPARK</name>
-      <version>1.6.x.2.5</version>
+      <version>1.6.x.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
index 2f83cf4..6440a3a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
@@ -23,7 +23,7 @@
   <services>
     <service>
       <name>SPARK2</name>
-      <version>2.0.x.2.5</version>
+      <version>2.0.x.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
index eb67d63..2d1fe85 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>SQOOP</name>
-      <version>1.4.6.2.5</version>
+      <version>1.4.6.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
index c1e9490..e5eb1c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
@@ -21,7 +21,7 @@
   <services>
     <service>
       <name>STORM</name>
-      <version>1.0.1.2.5</version>
+      <version>1.0.1.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
index 30f0c25..457342a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>TEZ</name>
-      <version>0.7.0.2.5</version>
+      <version>0.7.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
index 48b30a9..35b883e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
@@ -21,11 +21,11 @@
   <services>
     <service>
       <name>YARN</name>
-      <version>2.7.1.2.5</version>
+      <version>2.7.3.2.6</version>
     </service>
     <service>
       <name>MAPREDUCE2</name>
-      <version>2.7.1.2.5</version>
+      <version>2.7.3.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
index de8ebd9..e5f33f9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>ZOOKEEPER</name>
-      <version>3.4.6.2.5</version>
+      <version>3.4.6.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index d34d476..1c65f9b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -414,6 +415,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -665,16 +680,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
new file mode 100644
index 0000000..93680bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -0,0 +1,293 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>recovery_enabled</name>
+    <value>true</value>
+    <description>Auto start enabled or not for this cluster.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_type</name>
+    <value>AUTO_START</value>
+    <description>Auto start type.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_lifetime_max_count</name>
+    <value>1024</value>
+    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_max_count</name>
+    <value>6</value>
+    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_window_in_minutes</name>
+    <value>60</value>
+    <description>Auto start recovery window size in minutes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_retry_interval</name>
+    <value>5</value>
+    <description>Auto start recovery retry gap between tries per host component.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <display-name>Skip group modifications during install</display-name>
+    <value>false</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Whether to ignore failures on users and group creation</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_create_users_and_groups</name>
+    <display-name>Whether to skip creating users and groups in a sysprepped cluster</display-name>
+    <value>false</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Whether to skip creating users and groups in a sysprepped cluster</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_fast_jar_hdfs</name>
+    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_tarballs_hdfs</name>
+    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_oozie_share_lib_to_hdfs</name>
+    <display-name>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <display-name>Smoke User</display-name>
+    <value>ambari-qa</value>
+    <property-type>USER</property-type>
+    <description>User executing service checks</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser_keytab</name>
+    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+    <description>Path to smoke test user keytab file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>user_group</name>
+    <display-name>Hadoop Group</display-name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Hadoop user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_suse_rhel_template</name>
+    <value>[{{repo_id}}]
+name={{repo_id}}
+{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
+
+path=/
+enabled=1
+gpgcheck=0</value>
+    <description>Template of repositories for rhel and suse.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_ubuntu_template</name>
+    <value>{{package_type}} {{base_url}} {{components}}</value>
+    <description>Template of repositories for ubuntu.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>override_uid</name>
+    <value>true</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <display-name>Have Ambari manage UIDs</display-name>
+    <description>Have Ambari manage UIDs</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>fetch_nonlocal_groups</name>
+    <value>true</value>
+    <display-name>Ambari fetch nonlocal groups</display-name>
+    <description>Ambari requires fetching all the groups. This can be slow
+        on envs with enabled ldap. Setting this option to false will enable Ambari,
+        to skip user/group management connected with ldap groups.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>managed_hdfs_resource_property_names</name>
+    <value/>
+    <description>Comma separated list of property names with HDFS resource paths.
+        Resource from this list will be managed even if it is marked as not managed in the stack</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_tools</name>
+    <value/>
+    <description>Stack specific tools</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_tools.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_features</name>
+    <value/>
+    <description>List of features supported by the stack</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_features.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>stack_root</name>
+    <value>/usr/hdp</value>
+    <description>Stack root folder</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>alerts_repeat_tolerance</name>
+    <value>1</value>
+    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_bad_mounts</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_dirs_on_root</name>
+    <value>true</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>one_dir_per_partition</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hide_yarn_memory_widget</name>
+    <value>false</value>
+    <description>YARN Memory widget should be hidden by default on the dashboard.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+   <property>
+    <name>agent_mounts_ignore_list</name>
+    <value/>
+    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <visible>true</visible>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..8a583b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_stack_symlinks
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_stack_symlinks()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
new file mode 100644
index 0000000..566f5b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+
+is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# default hadoop params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = None
+
+versioned_stack_root = format('{stack_root}/current')
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
+stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
+
+upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..e9f2283
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+
+def setup_stack_symlinks():
+  """
+  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
+  stack version, such as "2.3". This should always be called after a component has been
+  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
+  interact with this since it's done via a custom command and will not trigger this hook.
+  :return:
+  """
+  import params
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    # try using the exact version first, falling back in just the stack if it's not defined
+    # which would only be during an intial cluster installation
+    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
+
+    if not params.upgrade_suspended:
+      if params.host_sys_prepped:
+        Logger.warning("Skipping running stack-selector-tool for stack {0} as its a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.".format(version))
+        return
+      # On parallel command execution this should be executed by a single process at a time.
+      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+        stack_select.select_all(version)
+
+def setup_config():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+
+  is_hadoop_conf_dir_present = False
+  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
+    is_hadoop_conf_dir_present = True
+  else:
+    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
+
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config diretory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of HDP-2.3 and higher
+  """
+  import params
+
+  if not Script.is_stack_greater_or_equal("2.3"):
+    Logger.info("Can only link configs for HDP-2.3 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for k, v in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100644
index 0000000..08542c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+set -e
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
new file mode 100644
index 0000000..c34be0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+
+    setup_users()
+    if params.has_namenode or params.dfs_type == 'HCFS':
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
new file mode 100644
index 0000000..f70c8e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,231 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (upgrade_type is not None) and version:
+  stack_version_formatted = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = None
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_stack_root = format('{stack_root}/current')
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+if not security_enabled:
+  hadoop_secure_dn_user = '""'
+else:
+  dfs_dn_port = get_port(dfs_dn_addr)
+  dfs_dn_http_port = get_port(dfs_dn_http_addr)
+  dfs_dn_https_port = get_port(dfs_dn_https_addr)
+  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+  if dfs_http_policy == "HTTPS_ONLY":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+  elif dfs_http_policy == "HTTP_AND_HTTPS":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+  if secure_dn_ports_are_in_use:
+    hadoop_secure_dn_user = hdfs_user
+  else:
+    hadoop_secure_dn_user = '""'
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
+zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+has_zeppelin_master = not len(zeppelin_master_hosts) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+if has_zeppelin_master:
+  user_to_groups_dict[zeppelin_user] = [zeppelin_group, user_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100644
index 0000000..320872e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,226 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+import tempfile
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  should_create_users_and_groups = False
+  if params.host_sys_prepped:
+    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
+  else:
+    should_create_users_and_groups = not params.ignore_groupsusers_create
+
+  if should_create_users_and_groups:
+    for group in params.group_list:
+      Group(group,
+      )
+
+    for user in params.user_list:
+      User(user,
+          gid = params.user_to_gid_dict[user],
+          groups = params.user_to_groups_dict[user],
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+      )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
+
+  if should_create_users_and_groups:
+    if params.has_namenode:
+      create_dfs_cluster_admins()
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
+      create_tez_am_view_acls()
+  else:
+    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+  )
+
+def create_tez_am_view_acls():
+
+  """
+  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  if not params.tez_am_view_acls.startswith("*"):
+    create_users_and_groups(params.tez_am_view_acls)
+
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+    
+def setup_hadoop_env():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("2.2"):
+      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
+        group=params.user_group )
+
+      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}"))
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=01777
+    )
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+    if not params.jdk_name: # if custom jdk is used.
+      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
+
+    try:
+      if params.jdk_name.endswith(".bin"):
+        chmod_cmd = ("chmod", "+x", jdk_curl_target)
+        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+      elif params.jdk_name.endswith(".gz"):
+        chmod_cmd = ("chmod","a+x", java_dir)
+        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+      Directory(java_dir
+      )
+
+      Execute(chmod_cmd,
+              sudo = True,
+              )
+
+      Execute(install_cmd,
+              )
+
+    finally:
+      Directory(tmp_java_dir, action="delete")
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+    Execute(('chmod', '-R', '755', params.java_home),
+      sudo = True,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..6193c11
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.expect import expect
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = 'tez-site' in config['configurations']
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100644
index 0000000..a35dce7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.resources.repository import Repository
+from resource_management.core.logger import Logger
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  if 0 == len(repo_dicts):
+    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
+  else:
+    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..1609050
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import stack_tools
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.packaging import Package
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
+    packages.append(stack_selector_package)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
new file mode 100644
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+


[05/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
new file mode 100644
index 0000000..a3ce186
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
@@ -0,0 +1,504 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import json
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+from only_for_platform import not_for_platform, PLATFORM_WINDOWS
+
+@not_for_platform(PLATFORM_WINDOWS)
+class TestRangerAdmin(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "RANGER/0.4.0/package"
+  STACK_VERSION = "2.6"
+
+  @patch("os.path.isfile")    
+  def test_start_default(self, isfile_mock):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py",
+      classname = "RangerAdmin",
+      command = "start",
+      config_file="ranger-admin-default.json",
+      stack_version = self.STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_default()
+
+    self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
+        create_parents = True,
+        mode = 0755,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
+        cd_access = 'a',
+        create_parents = True,
+        mode = 0755,
+        recursive_ownership = True,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
+        content = StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
+        mode = 0755,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
+        content = InlineTemplate(self.getConfig()['configurations']['infra-solr-client-log4j']['content']),
+        mode = 0644,
+    )
+    self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
+        content = '',
+        mode = 0664,
+    )
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_ranger_audits_0.[0-9]* --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir /usr/hdp/current/ranger-admin/contrib/solr_for_audit_setup/conf --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_ranger_audits_0.[0-9]*',
+                                    action=['delete'],
+                                    create_parents=True)
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection ranger_audits --config-set ranger_audits --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
+
+    self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start',
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep',
+      user = 'ranger',
+    )
+
+    self.assertTrue(isfile_mock.called)
+    self.assertNoMoreResources()
+
+  @patch("os.path.isfile")
+  def test_start_secured(self, isfile_mock):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py",
+      classname = "RangerAdmin",
+      command = "start",
+      config_file="ranger-admin-secured.json",
+      stack_version = self.STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_secured()
+
+    self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
+        create_parents = True,
+        mode = 0755,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
+        cd_access = 'a',
+        create_parents = True,
+        mode = 0755,
+        recursive_ownership = True,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
+        content = StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
+        mode = 0755,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
+        content = InlineTemplate(self.getConfig()['configurations']['infra-solr-client-log4j']['content']),
+        mode = 0644,
+    )
+    self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
+        content = '',
+        mode = 0664,
+    )
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger_solr_jaas.conf',
+      content = Template('ranger_solr_jaas_conf.j2'),
+      owner = 'ranger',
+    )
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /ambari-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --download-config --config-dir /tmp/solr_config_ranger_audits_0.[0-9]* --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --upload-config --config-dir /usr/hdp/current/ranger-admin/contrib/solr_for_audit_setup/conf --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_ranger_audits_0.[0-9]*',
+                                    action=['delete'],
+                                    create_parents=True)
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --create-collection --collection ranger_audits --config-set ranger_audits --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
+
+    self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start',
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep',
+      user = 'ranger',
+    )
+
+    self.assertTrue(isfile_mock.called)
+    self.assertNoMoreResources()
+
+
+  def assert_setup_db(self):
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+                              action = ['delete'],
+                              )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+                              content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+                              mode = 0644
+                              )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar',
+                                          '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib'),
+                              sudo = True,
+                              path = ['/bin', '/usr/bin/']
+                              )
+
+    self.assertResourceCalled('File', '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib/mysql-connector-java.jar',
+                              mode = 0644
+                              )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/2.6.0.0-801/ranger-admin/install.properties',
+                              properties = self.getConfig()['configurations']['admin-properties'],
+                              owner = 'ranger'
+                              )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/2.6.0.0-801/ranger-admin/install.properties',
+                              owner = 'ranger',
+                              properties = {'SQL_CONNECTOR_JAR':
+                                              '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib/mysql-connector-java.jar'}
+                              )
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+                              owner = 'ranger',
+                              properties = {'audit_store': 'solr'}
+                              )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/dba_script.py -q'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+  def assert_configure_default(self):
+
+    ### assert db setup
+    self.assert_setup_db()
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf',
+      owner = 'ranger',
+      group = 'ranger',
+      create_parents = True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+        action = ['delete'],
+    )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/hdp/current/ranger-admin/ews/lib'),
+      sudo = True,
+      path = ['/bin', '/usr/bin/']
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar',
+      mode = 0644
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      properties = self.getConfig()['configurations']['admin-properties'],
+      owner = 'ranger'
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      owner = 'ranger',
+      properties = {'SQL_CONNECTOR_JAR': '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar'}
+    )
+
+    self.assertResourceCalled('File', '/usr/lib/ambari-agent/DBConnectionVerification.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute',
+      '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar:/usr/hdp/current/ranger-admin/ews/lib/* org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6401.ambari.apache.org:3306/ranger01\' rangeradmin01 rangeradmin01 com.mysql.jdbc.Driver',
+      path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries=5,
+      try_sleep=10,
+      environment = {}
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf', '/usr/hdp/current/ranger-admin/conf'),
+      not_if = 'ls /usr/hdp/current/ranger-admin/conf',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf',
+      sudo = True
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/',
+      owner='ranger',
+      group='ranger',
+      recursive_ownership = True
+    )
+
+    self.assertResourceCalled('Directory', '/var/run/ranger',
+      mode=0755,
+      owner = 'ranger',
+      group = 'hadoop',
+      cd_access = "a",
+      create_parents=True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-piddir.sh',
+      content = 'export RANGER_PID_DIR_PATH=/var/run/ranger\nexport RANGER_USER=ranger',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('Directory', '/var/log/ranger/admin',
+      owner='ranger',
+      group='ranger',
+      create_parents = True,
+      cd_access = 'a',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-logdir.sh',
+      content = 'export RANGER_ADMIN_LOG_DIR=/var/log/ranger/admin',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-default-site.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/security-applicationContext.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh', '/usr/bin/ranger-admin'),
+      not_if = 'ls /usr/bin/ranger-admin',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh',
+      sudo = True
+    )
+
+    self.assertResourceCalled('XmlConfig', 'ranger-admin-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['ranger-admin-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['ranger-admin-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf/ranger_jaas',
+      owner ='ranger',
+      group ='ranger',
+      mode = 0700
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/log4j.properties',
+      owner = 'ranger',
+      group = 'ranger',
+      content = self.getConfig()['configurations']['admin-log4j']['content'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('/usr/jdk64/jdk1.7.0_45/bin/java', '-cp', '/usr/hdp/current/ranger-admin/cred/lib/*', 'org.apache.ranger.credentialapi.buildks', 'create', 'rangeradmin', '-value', 'rangeradmin01', '-provider', 'jceks://file/etc/ranger/admin/rangeradmin.jceks'),
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      logoutput=True,
+      sudo = True
+    )
+
+    self.assertResourceCalled('File', '/etc/ranger/admin/rangeradmin.jceks',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0640
+    )
+
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py -javapatch'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+  def assert_configure_secured(self):
+
+    ### assert db setup
+    self.assert_setup_db()
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf',
+      owner = 'ranger',
+      group = 'ranger',
+      create_parents = True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+        action = ['delete'],
+    )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/hdp/current/ranger-admin/ews/lib'),
+      sudo = True,
+      path = ['/bin', '/usr/bin/']
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar',
+      mode = 0644
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      properties = self.getConfig()['configurations']['admin-properties'],
+      owner = 'ranger'
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      owner = 'ranger',
+      properties = {'SQL_CONNECTOR_JAR': '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar'}
+    )
+
+    self.assertResourceCalled('File', '/usr/lib/ambari-agent/DBConnectionVerification.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute',
+      '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar:/usr/hdp/current/ranger-admin/ews/lib/* org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6401.ambari.apache.org:3306/ranger01\' rangeradmin01 rangeradmin01 com.mysql.jdbc.Driver',
+      path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries=5,
+      try_sleep=10,
+      environment = {}
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf', '/usr/hdp/current/ranger-admin/conf'),
+      not_if = 'ls /usr/hdp/current/ranger-admin/conf',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf',
+      sudo = True
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/',
+      owner='ranger',
+      group='ranger',
+      recursive_ownership = True
+    )
+
+    self.assertResourceCalled('Directory', '/var/run/ranger',
+      mode=0755,
+      owner = 'ranger',
+      group = 'hadoop',
+      cd_access = "a",
+      create_parents=True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-piddir.sh',
+      content = 'export RANGER_PID_DIR_PATH=/var/run/ranger\nexport RANGER_USER=ranger',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('Directory', '/var/log/ranger/admin',
+      owner='ranger',
+      group='ranger',
+      create_parents = True,
+      cd_access = 'a',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-logdir.sh',
+      content = 'export RANGER_ADMIN_LOG_DIR=/var/log/ranger/admin',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-default-site.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/security-applicationContext.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh', '/usr/bin/ranger-admin'),
+      not_if = 'ls /usr/bin/ranger-admin',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh',
+      sudo = True
+    )
+
+    self.assertResourceCalled('XmlConfig', 'ranger-admin-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['ranger-admin-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['ranger-admin-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf/ranger_jaas',
+      owner ='ranger',
+      group ='ranger',
+      mode = 0700
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/log4j.properties',
+      owner = 'ranger',
+      group = 'ranger',
+      content = self.getConfig()['configurations']['admin-log4j']['content'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('/usr/jdk64/jdk1.7.0_45/bin/java', '-cp', '/usr/hdp/current/ranger-admin/cred/lib/*', 'org.apache.ranger.credentialapi.buildks', 'create', 'rangeradmin', '-value', 'rangeradmin01', '-provider', 'jceks://file/etc/ranger/admin/rangeradmin.jceks'),
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      logoutput=True,
+      sudo = True
+    )
+
+    self.assertResourceCalled('File', '/etc/ranger/admin/rangeradmin.jceks',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0640
+    )
+
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py -javapatch'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
\ No newline at end of file


[08/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 90a3d02..daa3abc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -87,7 +87,6 @@ import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
@@ -964,21 +963,14 @@ public class ClusterTest {
     Map<String, Map<String, String>> c2PropAttributes = new HashMap<String, Map<String,String>>();
     c2PropAttributes.put("final", new HashMap<String, String>());
     c2PropAttributes.get("final").put("x", "true");
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, c1PropAttributes);
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, c2PropAttributes);
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     c1.addDesiredConfig("_test", Collections.singleton(config1));
     Config res = c1.getDesiredConfigByType("global");
@@ -998,21 +990,14 @@ public class ClusterTest {
   public void testDesiredConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     try {
       c1.addDesiredConfig(null, Collections.singleton(config1));
@@ -1132,18 +1117,11 @@ public class ClusterTest {
 
     c1.addService("HDFS");
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    config1.persist();
-    c1.addConfig(config1);
-    config2.persist();
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1209,10 +1187,9 @@ public class ClusterTest {
     Map<String, Map<String, String>> propAttributes = new HashMap<String, Map<String,String>>();
     propAttributes.put("final", new HashMap<String, String>());
     propAttributes.get("final").put("test", "true");
-    Config config = configFactory.createNew(c1, "hdfs-site", new HashMap<String, String>(){{
+    Config config = configFactory.createNew(c1, "hdfs-site", "1", new HashMap<String, String>(){{
       put("test", "test");
     }}, propAttributes);
-    config.setTag("1");
 
     host1.addDesiredConfig(c1.getClusterId(), true, "test", config);
 
@@ -1247,16 +1224,11 @@ public class ClusterTest {
   public void testServiceConfigVersions() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     c1.addDesiredConfig("admin", Collections.singleton(config1));
     List<ServiceConfigVersionResponse> serviceConfigVersions =
@@ -1310,16 +1282,11 @@ public class ClusterTest {
   public void testSingleServiceVersionForMultipleConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1345,11 +1312,8 @@ public class ClusterTest {
   public void testServiceConfigVersionsForGroups() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    c1.addConfig(config1);
 
     ServiceConfigVersionResponse scvResponse =
       c1.addDesiredConfig("admin", Collections.singleton(config1));
@@ -1361,16 +1325,13 @@ public class ClusterTest {
     Assert.assertEquals("Only one scv should be active", 1, activeServiceConfigVersions.get("HDFS").size());
 
     //create config group
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("a", "c"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
     ConfigGroup configGroup =
       configGroupFactory.createNew(c1, "test group", "HDFS", "descr", Collections.singletonMap("hdfs-site", config2),
         Collections.<Long, Host>emptyMap());
 
-    configGroup.persist();
-
     c1.addConfigGroup(configGroup);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1381,12 +1342,11 @@ public class ClusterTest {
     Assert.assertEquals("Two service config versions should be active, for default and test groups",
       2, activeServiceConfigVersions.get("HDFS").size());
 
-    Config config3 = configFactory.createNew(c1, "hdfs-site",
+    Config config3 = configFactory.createNew(c1, "hdfs-site", "version3",
       new HashMap<String, String>() {{ put("a", "d"); }}, new HashMap<String, Map<String,String>>());
 
     configGroup.setConfigurations(Collections.singletonMap("hdfs-site", config3));
 
-    configGroup.persist();
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
     assertEquals("SCV 3 should be created", Long.valueOf(3), scvResponse.getVersion());
 
@@ -1417,16 +1377,14 @@ public class ClusterTest {
 
     //check config with empty cluster
 
-    Config config4 = new ConfigImpl("hdfs-site");
-    config4.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
+    Config config4 = configFactory.createReadOnly("hdfs-site", "version4",
+        Collections.singletonMap("a", "b"), null);
 
     ConfigGroup configGroup2 =
-        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr", Collections.singletonMap("hdfs-site", config4),
+        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr",
+            new HashMap<>(Collections.singletonMap("hdfs-site", config4)),
             Collections.<Long, Host>emptyMap());
 
-    configGroup2.persist();
     c1.addConfigGroup(configGroup2);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup2);
@@ -1443,12 +1401,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1459,11 +1413,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1507,12 +1460,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1523,11 +1472,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -2373,17 +2321,13 @@ public class ClusterTest {
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
     assertEquals(0, clusterEntity.getClusterConfigEntities().size());
 
-    final Config originalConfig = configFactory.createNew(cluster, "foo-site",
+    final Config originalConfig = configFactory.createNew(cluster, "foo-site", "version3",
         new HashMap<String, String>() {
           {
             put("one", "two");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    originalConfig.setTag("version3");
-    originalConfig.persist();
-    cluster.addConfig(originalConfig);
-
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1", "t1", "",
         new HashMap<String, Config>() {
           {
@@ -2391,7 +2335,6 @@ public class ClusterTest {
           }
         }, Collections.<Long, Host> emptyMap());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     clusterEntity = clusterDAO.findByName("c1");
@@ -2403,8 +2346,7 @@ public class ClusterTest {
     Map<String, String> properties = config.getProperties();
     properties.put("three", "four");
     config.setProperties(properties);
-
-    config.persist(false);
+    config.save();
 
     clusterEntity = clusterDAO.findByName("c1");
     assertEquals(1, clusterEntity.getClusterConfigEntities().size());
@@ -2525,6 +2467,101 @@ public class ClusterTest {
   }
 
   /**
+   * Tests that {@link Cluster#applyLatestConfigurations(StackId)} sets the
+   * right configs to enabled when there are duplicate mappings for type/tag.
+   * Only the most recent should be enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testApplyLatestConfigurationsWithMultipleMappings() throws Exception {
+    createDefaultCluster();
+    Cluster cluster = clusters.getCluster("c1");
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    StackId stackId = cluster.getCurrentStackVersion();
+
+    StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+
+    String configType = "foo-type";
+    String configTag = "version-1";
+
+    // create the config for the mappings
+    ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
+    clusterConfig.setClusterEntity(clusterEntity);
+    clusterConfig.setConfigId(1L);
+    clusterConfig.setStack(currentStack);
+    clusterConfig.setTag(configTag);
+    clusterConfig.setData("{}");
+    clusterConfig.setType(configType);
+    clusterConfig.setTimestamp(1L);
+    clusterConfig.setVersion(1L);
+
+    clusterDAO.createConfig(clusterConfig);
+    clusterEntity.getClusterConfigEntities().add(clusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // create 3 mappings for the same type/tag, each with a different time
+
+    // config mapping 1
+    ClusterConfigMappingEntity configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(1L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    // config mapping 2
+    configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(2L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    // config mapping 3
+    configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(3L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // check all 3 mappings are disabled
+    Collection<ClusterConfigMappingEntity> clusterConfigMappings = clusterEntity.getConfigMappingEntities();
+    Assert.assertEquals(3, clusterConfigMappings.size());
+    for (ClusterConfigMappingEntity clusterConfigMapping : clusterConfigMappings) {
+      Assert.assertEquals(0, clusterConfigMapping.isSelected());
+    }
+
+    // apply configurations and check to see we've set the one with the latest
+    // timestamp ONLY
+    cluster.applyLatestConfigurations(cluster.getCurrentStackVersion());
+    clusterEntity = clusterDAO.findByName("c1");
+
+    // now check that the new config mapping is enabled
+    clusterConfigMappings = clusterEntity.getConfigMappingEntities();
+    Assert.assertEquals(3, clusterConfigMappings.size());
+    for (ClusterConfigMappingEntity clusterConfigMapping : clusterConfigMappings) {
+      if (clusterConfigMapping.getCreateTimestamp() < 3) {
+        Assert.assertEquals(0, clusterConfigMapping.isSelected());
+      } else {
+        Assert.assertEquals(1, clusterConfigMapping.isSelected());
+      }
+    }
+  }
+
+  /**
    * Tests that applying configurations for a given stack correctly sets
    * {@link DesiredConfig}s.
    */
@@ -2545,13 +2582,7 @@ public class ClusterTest {
 
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -2562,12 +2593,7 @@ public class ClusterTest {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 5886234..d75d9d0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -405,19 +405,15 @@ public class ClustersTest {
     cluster.transitionClusterVersion(stackId, stackId.getStackVersion(),
         RepositoryVersionState.CURRENT);
 
-    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "1",
         new HashMap<String, String>() {{
           put("prop1", "val1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("1");
-    config1.persist();
 
-    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "2",
         new HashMap<String, String>() {{
           put("prop2", "val2");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("2");
-    config2.persist();
 
     // cluster desired config
     cluster.addDesiredConfig("_test", Collections.singleton(config1));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 1f09002..96dbf26 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -114,17 +114,12 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    config1.persist();
-    config2.persist();
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     String hostName = "c6401";

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
index 596f381..5c8d174 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
@@ -384,7 +384,7 @@ public class HostTest {
     clusters.mapHostToCluster("h1", "c1");
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(c1, "global",
+    Config config = configFactory.createNew(c1, "global", "v1",
         new HashMap<String,String>() {{ put("a", "b"); put("x", "y"); }}, new HashMap<String, Map<String,String>>());
 
     try {
@@ -396,16 +396,14 @@ public class HostTest {
     }
 
 
-    config.setTag("v1");
     host.addDesiredConfig(c1.getClusterId(), true, "_test", config);
 
     Map<String, DesiredConfig> map = host.getDesiredConfigs(c1.getClusterId());
     Assert.assertTrue("Expect desired config to contain global", map.containsKey("global"));
     Assert.assertEquals("Expect global user to be '_test'", "_test", map.get("global").getUser());
 
-    config = configFactory.createNew(c1, "global",
+    config = configFactory.createNew(c1, "global", "v2",
         new HashMap<String,String>() {{ put("c", "d"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("v2");
     host.addDesiredConfig(c1.getClusterId(), true, "_test1", config);
 
     map = host.getDesiredConfigs(c1.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
index dde7ffa..8f9f791 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
@@ -18,6 +18,7 @@
 package org.apache.ambari.server.state.stack;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -570,6 +571,27 @@ public class UpgradePackTest {
 
     Map<String, Map<String, ProcessingComponent>> tasks = upgradePack.getTasks();
     assertTrue(tasks.containsKey("HBASE"));
+
+    // !!! generalized upgrade pack shouldn't be in this
+    boolean found = false;
+    for (Grouping grouping : upgradePack.getAllGroups()) {
+      if (grouping.name.equals("GANGLIA_UPGRADE")) {
+        found = true;
+        break;
+      }
+    }
+    assertFalse(found);
+
+    // !!! test merge of a generalized upgrade pack
+    upgradePack = upgrades.get("upgrade_test_conditions");
+    assertNotNull(upgradePack);
+    for (Grouping grouping : upgradePack.getAllGroups()) {
+      if (grouping.name.equals("GANGLIA_UPGRADE")) {
+        found = true;
+        break;
+      }
+    }
+    assertTrue(found);
   }
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 77e5142..5987af3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -221,11 +221,8 @@ public class ServiceComponentHostTest {
 
     Cluster c = clusters.getCluster(clusterName);
     if (c.getConfig("time", String.valueOf(timestamp)) == null) {
-      Config config = configFactory.createNew (c, "time",
+      Config config = configFactory.createNew (c, "time", String.valueOf(timestamp),
           new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-      config.setTag(String.valueOf(timestamp));
-      c.addConfig(config);
-      config.persist();
     }
 
     switch (eventType) {
@@ -564,7 +561,6 @@ public class ServiceComponentHostTest {
     final ConfigGroup configGroup = configGroupFactory.createNew(cluster,
       "cg1", "t1", "", new HashMap<String, Config>(), new HashMap<Long, Host>());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Map<String, Map<String,String>> actual =
@@ -815,17 +811,14 @@ public class ServiceComponentHostTest {
     final Host host = clusters.getHostsForCluster(clusterName).get(hostName);
     Assert.assertNotNull(host);
 
-    final Config c = configFactory.createNew(cluster, "hdfs-site",
+    final Config c = configFactory.createNew(cluster, "hdfs-site", "version3",
         new HashMap<String, String>() {{ put("dfs.journalnode.http-address", "http://goo"); }},
         new HashMap<String, Map<String,String>>());
-    c.setTag("version3");
-    c.persist();
-    cluster.addConfig(c);
+
     host.addDesiredConfig(cluster.getClusterId(), true, "user", c);
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1",
       "t1", "", new HashMap<String, Config>() {{ put("hdfs-site", c); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     // HDP-x/HDFS/hdfs-site updated host to changed property
@@ -876,16 +869,12 @@ public class ServiceComponentHostTest {
 
     sch1.updateActualConfigs(actual);
 
-    final Config c1 = configFactory.createNew(cluster, "core-site",
+    final Config c1 = configFactory.createNew(cluster, "core-site", "version2",
       new HashMap<String, String>() {{ put("fs.trash.interval", "400"); }},
       new HashMap<String, Map<String,String>>());
-    c1.setTag("version2");
-    c1.persist();
-    cluster.addConfig(c1);
     configGroup = configGroupFactory.createNew(cluster, "g2",
       "t2", "", new HashMap<String, Config>() {{ put("core-site", c1); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Assert.assertTrue(sch1.convertToResponse(null).isStaleConfig());
@@ -1039,10 +1028,7 @@ public class ServiceComponentHostTest {
    * @param values the values for the config
    */
   private void makeConfig(Cluster cluster, String type, String tag, Map<String, String> values, Map<String, Map<String, String>> attributes) {
-    Config config = configFactory.createNew(cluster, type, values, attributes);
-    config.setTag(tag);
-    config.persist();
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, type, tag, values, attributes);
     cluster.addDesiredConfig("user", Collections.singleton(config));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
index 82526e7..fac5185 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
@@ -59,6 +59,7 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
@@ -66,13 +67,14 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.easymock.Capture;
+import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableList;
 
 /**
  * AmbariContext unit tests
@@ -110,6 +112,7 @@ public class AmbariContextTest {
   private static final ConfigGroup configGroup2 = createMock(ConfigGroup.class);
   private static final Host host1 = createNiceMock(Host.class);
   private static final Host host2 = createNiceMock(Host.class);
+  private static final ConfigFactory configFactory = createNiceMock(ConfigFactory.class);
 
   private static final Collection<String> blueprintServices = new HashSet<String>();
   private static final Map<String, Service> clusterServices = new HashMap<String, Service>();
@@ -164,6 +167,9 @@ public class AmbariContextTest {
     type1Props.put("prop3", "val3");
     group1Configuration = new Configuration(group1Properties, null, bpConfiguration);
 
+    Map<String, String> group1ResolvedProperties = new HashMap<String, String>(bpType1Props);
+    group1ResolvedProperties.putAll(type1Props);
+
     // config type -> service mapping
     Map<String, String> configTypeServiceMapping = new HashMap<String, String>();
     configTypeServiceMapping.put("type1", "service1");
@@ -172,6 +178,28 @@ public class AmbariContextTest {
     configGroups.put(1L, configGroup1);
     configGroups.put(2L, configGroup2);
 
+    // config factory mock
+    Config type1Group1 = createNiceMock(Config.class);
+    expect(type1Group1.getType()).andReturn("type1").anyTimes();
+    expect(type1Group1.getTag()).andReturn("group1").anyTimes();
+    expect(type1Group1.getProperties()).andReturn(group1ResolvedProperties).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("group1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(type1Group1).anyTimes();
+    replay(type1Group1);
+
+    Config type1Service1 = createNiceMock(Config.class);
+    expect(type1Service1.getType()).andReturn("type1").anyTimes();
+    expect(type1Service1.getTag()).andReturn("service1").anyTimes();
+    expect(type1Service1.getProperties()).andReturn(type1Props).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("service1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(
+            type1Service1).anyTimes();
+    replay(type1Service1);
+
+    context.configFactory = configFactory;
+
     blueprintServices.add("service1");
     blueprintServices.add("service2");
 
@@ -222,17 +250,17 @@ public class AmbariContextTest {
   public void tearDown() throws Exception {
     verify(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
 
     reset(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   private void replayAll() {
     replay(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   @Test
@@ -330,6 +358,7 @@ public class AmbariContextTest {
     expect(clusterController.ensureResourceProvider(Resource.Type.ConfigGroup)).andReturn(configGroupResourceProvider).once();
     //todo: for now not using return value so just returning null
     expect(configGroupResourceProvider.createResources(capture(configGroupRequestCapture))).andReturn(null).once();
+
     // replay all mocks
     replayAll();
 
@@ -416,7 +445,6 @@ public class AmbariContextTest {
 
     expect(configGroup1.getHosts()).andReturn(Collections.singletonMap(2L, host2)).once();
     configGroup1.addHost(host1);
-    configGroup1.persistHostMapping();
 
     // replay all mocks
     replayAll();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
index f9dd5d1..3bb6c0a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
@@ -49,6 +49,8 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.utils.CollectionPresentationUtils;
@@ -62,6 +64,7 @@ import com.google.gson.JsonPrimitive;
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
 
 import junit.framework.Assert;
 
@@ -212,16 +215,12 @@ public class HostUpdateHelperTest {
     Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
     Cluster mockCluster = easyMockSupport.createNiceMock(Cluster.class);
     ClusterEntity mockClusterEntity1 = easyMockSupport.createNiceMock(ClusterEntity.class);
-    ClusterEntity mockClusterEntity2 = easyMockSupport.createNiceMock(ClusterEntity.class);
     ClusterConfigEntity mockClusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     ClusterConfigEntity mockClusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity3 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity4 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     StackEntity mockStackEntity = easyMockSupport.createNiceMock(StackEntity.class);
     Map<String, Map<String, String>> clusterHostsToChange = new HashMap<>();
     Map<String, String> hosts = new HashMap<>();
     List<ClusterConfigEntity> clusterConfigEntities1 = new ArrayList<>();
-    List<ClusterConfigEntity> clusterConfigEntities2 = new ArrayList<>();
 
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
@@ -231,6 +230,8 @@ public class HostUpdateHelperTest {
         bind(EntityManager.class).toInstance(entityManager);
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
         bind(ClusterDAO.class).toInstance(mockClusterDAO);
+
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });
 
@@ -242,49 +243,42 @@ public class HostUpdateHelperTest {
     clusterConfigEntities1.add(mockClusterConfigEntity1);
     clusterConfigEntities1.add(mockClusterConfigEntity2);
 
-    clusterConfigEntities2.add(mockClusterConfigEntity3);
-    clusterConfigEntities2.add(mockClusterConfigEntity4);
-
     clusterHostsToChange.put("cl1", hosts);
 
-    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).once();
-    expect(mockClusterDAO.findById(1L)).andReturn(mockClusterEntity2).atLeastOnce();
+    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).atLeastOnce();
 
     expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
 
     expect(mockClusters.getCluster("cl1")).andReturn(mockCluster).once();
-    expect(mockCluster.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockCluster.getClusterId()).andReturn(1L).anyTimes();
 
     expect(mockClusterEntity1.getClusterConfigEntities()).andReturn(clusterConfigEntities1).atLeastOnce();
-    expect(mockClusterEntity2.getClusterConfigEntities()).andReturn(clusterConfigEntities2).atLeastOnce();
 
-    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity1.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getConfigId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity1.getData()).andReturn("{\"testProperty1\" : \"testValue_host1\", " +
             "\"testProperty2\" : \"testValue_host5\", \"testProperty3\" : \"testValue_host11\", " +
             "\"testProperty4\" : \"testValue_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity1.getTag()).andReturn("testTag1").atLeastOnce();
     expect(mockClusterConfigEntity1.getType()).andReturn("testType1").atLeastOnce();
     expect(mockClusterConfigEntity1.getVersion()).andReturn(1L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(1L)).andReturn(mockClusterConfigEntity1).atLeastOnce();
 
-    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity2.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity2.getConfigId()).andReturn(2L).anyTimes();
+    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity2.getData()).andReturn("{\"testProperty5\" : \"test_host1_test_host5_test_host11_test_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity2.getTag()).andReturn("testTag2").atLeastOnce();
     expect(mockClusterConfigEntity2.getType()).andReturn("testType2").atLeastOnce();
     expect(mockClusterConfigEntity2.getVersion()).andReturn(2L).atLeastOnce();
-
-    expect(mockClusterConfigEntity3.getTag()).andReturn("testTag1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getType()).andReturn("testType1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getVersion()).andReturn(1L).atLeastOnce();
-
-    expect(mockClusterConfigEntity4.getTag()).andReturn("testTag2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getType()).andReturn("testType2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getVersion()).andReturn(2L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(2L)).andReturn(mockClusterConfigEntity2).atLeastOnce();
 
     Capture<String> dataCapture = EasyMock.newCapture();
-    mockClusterConfigEntity3.setData(EasyMock.capture(dataCapture));
+    mockClusterConfigEntity1.setData(EasyMock.capture(dataCapture));
     expectLastCall();
 
-    mockClusterConfigEntity4.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
+    mockClusterConfigEntity2.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
     expectLastCall();
 
     HostUpdateHelper hostUpdateHelper = new HostUpdateHelper(null, null, mockInjector);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index e8ed042..f19288f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -876,6 +876,8 @@ public class UpgradeCatalog210Test {
 
     final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
     final Map<String, String> propertiesExpectedCoreSite = new HashMap<String, String>();
+    propertiesExpectedHdfs.put("dfs.nameservices", "nncl1,nncl2");
+    propertiesExpectedHdfs.put("dfs.ha.namenodes.nncl2", "nn1,nn2");
     propertiesExpectedCoreSite.put("fs.defaultFS", "hdfs://EXAMPLE.COM:8020");
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index dc8a7e5..6da2cf1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
@@ -211,6 +212,8 @@ public class UpgradeCatalog250Test {
     Method updateAmsConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAMSConfigs");
     Method updateKafkaConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateKafkaConfigs");
     Method updateHiveLlapConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHiveLlapConfigs");
+    Method updateTablesForZeppelinViewRemoval = UpgradeCatalog250.class.getDeclaredMethod("updateTablesForZeppelinViewRemoval");
+    Method updateAtlasConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAtlasConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateHIVEInteractiveConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHIVEInteractiveConfigs");
     Method updateTEZInteractiveConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateTEZInteractiveConfigs");
@@ -221,6 +224,8 @@ public class UpgradeCatalog250Test {
       .addMockedMethod(updateHIVEInteractiveConfigs)
       .addMockedMethod(updateTEZInteractiveConfigs)
       .addMockedMethod(updateHiveLlapConfigs)
+      .addMockedMethod(updateTablesForZeppelinViewRemoval)
+      .addMockedMethod(updateAtlasConfigs)
       .addMockedMethod(addNewConfigurationsFromXml)
       .createMock();
 
@@ -243,6 +248,12 @@ public class UpgradeCatalog250Test {
     upgradeCatalog250.updateHiveLlapConfigs();
     expectLastCall().once();
 
+    upgradeCatalog250.updateTablesForZeppelinViewRemoval();
+    expectLastCall().once();
+
+    upgradeCatalog250.updateAtlasConfigs();
+    expectLastCall().once();
+
     replay(upgradeCatalog250);
 
     upgradeCatalog250.executeDMLUpdates();
@@ -592,4 +603,91 @@ public class UpgradeCatalog250Test {
     Map<String, String> updatedProperties = propertiesCapture.getValue();
     assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
   }
+
+  @Test
+  public void testUpdateAtlasConfigs() throws Exception {
+
+    Map<String, String> oldHiveProperties = new HashMap<String, String>();
+    Map<String, String> newHiveProperties = new HashMap<String, String>();
+
+    oldHiveProperties.put("hive.atlas.hook", "false");
+    newHiveProperties.put("hive.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldHiveProperties, newHiveProperties, "hive-env");
+
+    Map<String, String> oldStormProperties = new HashMap<String, String>();
+    Map<String, String> newStormProperties = new HashMap<String, String>();
+    oldStormProperties.put("storm.atlas.hook", "false");
+    newStormProperties.put("storm.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldStormProperties, newStormProperties, "storm-env");
+
+    Map<String, String> oldFalconProperties = new HashMap<String, String>();
+    Map<String, String> newFalconProperties = new HashMap<String, String>();
+    oldFalconProperties.put("falcon.atlas.hook", "false");
+    newFalconProperties.put("falcon.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldFalconProperties, newFalconProperties, "falcon-env");
+
+    Map<String, String> oldSqoopProperties = new HashMap<String, String>();
+    Map<String, String> newSqoopProperties = new HashMap<String, String>();
+    oldSqoopProperties.put("sqoop.atlas.hook", "false");
+    newSqoopProperties.put("sqoop.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldSqoopProperties, newSqoopProperties, "sqoop-env");
+  }
+
+  public void testUpdateAtlasHookConfig(Map<String, String> oldProperties, Map<String, String> newProperties, String configType) throws Exception {
+
+    Map<String, Service> installedServices = new HashMap<String, Service>() {
+      {
+        put("ATLAS", null);
+        put("HIVE", null);
+        put("STORM", null);
+        put("FALCON", null);
+        put("SQOOP", null);
+      }
+    };
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    final Service service = createStrictMock(Service.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getClusterName()).andReturn("cl1").once();
+    expect(cluster.getServices()).andReturn(installedServices).atLeastOnce();
+
+    Config mockAtlasConfig = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType(configType)).andReturn(mockAtlasConfig).atLeastOnce();
+    expect(mockAtlasConfig.getProperties()).andReturn(oldProperties).anyTimes();
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+    replay(injector, clusters, mockAtlasConfig, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .addMockedMethod("createConfig")
+      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+      .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+      anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+
+    replay(controller, injector2);
+    new UpgradeCatalog250(injector2).updateAtlasConfigs();
+    easyMockSupport.verifyAll();
+
+    Map<String, String> updatedProperties = propertiesCapture.getValue();
+    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/utils/RequestUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/RequestUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/RequestUtilsTest.java
index 595127e..f36858e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/RequestUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/RequestUtilsTest.java
@@ -30,6 +30,7 @@ import org.junit.Test;
 public class RequestUtilsTest {
 
   public static final String REMOTE_ADDRESS = "12.13.14.15";
+  public static final String REMOTE_ADDRESS_MULTIPLE = "12.13.14.15,12.13.14.16";
 
   @Test
   public void testGetRemoteAddress() {
@@ -49,6 +50,23 @@ public class RequestUtilsTest {
   }
 
   @Test
+  public void testGetMultipleRemoteAddress() {
+    // GIVEN
+    HttpServletRequest mockedRequest = createMock(HttpServletRequest.class);
+    expect(mockedRequest.getHeader("X-Forwarded-For")).andReturn(null);
+    expect(mockedRequest.getHeader("Proxy-Client-IP")).andReturn("unknown");
+    expect(mockedRequest.getHeader("WL-Proxy-Client-IP")).andReturn("");
+    expect(mockedRequest.getHeader("HTTP_CLIENT_IP")).andReturn("unknown");
+    expect(mockedRequest.getHeader("HTTP_X_FORWARDED_FOR")).andReturn(REMOTE_ADDRESS_MULTIPLE);
+    replay(mockedRequest);
+    // WHEN
+    String remoteAddress = RequestUtils.getRemoteAddress(mockedRequest);
+    // THEN
+    assertEquals(REMOTE_ADDRESS, remoteAddress);
+    verify(mockedRequest);
+  }
+
+  @Test
   public void testGetRemoteAddressFoundFirstHeader() {
     // GIVEN
     HttpServletRequest mockedRequest = createMock(HttpServletRequest.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index 29f40fb..5c77831 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -65,6 +65,9 @@ import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
@@ -126,6 +129,7 @@ public class StageUtilsTest extends EasyMockSupport {
         bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
 
         install(new FactoryModuleBuilder().build(ExecutionCommandWrapperFactory.class));
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/TestMpacks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestMpacks.py b/ambari-server/src/test/python/TestMpacks.py
index 4b54415..ef81945 100644
--- a/ambari-server/src/test/python/TestMpacks.py
+++ b/ambari-server/src/test/python/TestMpacks.py
@@ -64,7 +64,7 @@ def get_configs():
     serverConfiguration.STACK_LOCATION_KEY : "/var/lib/ambari-server/resources/stacks",
     serverConfiguration.COMMON_SERVICES_PATH_PROPERTY : "/var/lib/ambari-server/resources/common-services",
     serverConfiguration.EXTENSION_PATH_PROPERTY : "/var/lib/ambari-server/resources/extensions",
-    serverConfiguration.DASHBOARD_PATH_PROPERTY : "/var/lib/ambari-server/resources/dashboards",
+    serverConfiguration.RESOURCES_DIR_PROPERTY : "/var/lib/ambari-server/resources",
     serverConfiguration.MPACKS_STAGING_PATH_PROPERTY : mpacks_directory,
     serverConfiguration.SERVER_TMP_DIR_PROPERTY : "/tmp",
     serverConfiguration.JDBC_DATABASE_PROPERTY: "postgres"
@@ -100,9 +100,11 @@ class TestMpacks(TestCase):
       fail = True
     self.assertTrue(fail)
 
+  @patch("os.path.exists")
   @patch("ambari_server.setupMpacks.get_YN_input")
   @patch("ambari_server.setupMpacks.run_mpack_install_checker")
-  def test_validate_purge(self, run_mpack_install_checker_mock, get_YN_input_mock):
+  @patch("ambari_server.setupMpacks.get_ambari_properties")
+  def test_validate_purge(self, get_ambari_properties_mock, run_mpack_install_checker_mock, get_YN_input_mock, os_path_exists_mock):
     options = self._create_empty_options_mock()
     options.purge = True
     purge_list = options.purge_list.split(',')
@@ -112,6 +114,7 @@ class TestMpacks(TestCase):
     replay_mode = False
     run_mpack_install_checker_mock.return_value = (0, "No errors found", "")
     get_YN_input_mock.return_value = True
+    os_path_exists_mock.return_value = True
 
     fail = False
     try:
@@ -160,22 +163,28 @@ class TestMpacks(TestCase):
     extensions_directory = configs[serverConfiguration.EXTENSION_PATH_PROPERTY]
     common_services_directory = configs[serverConfiguration.COMMON_SERVICES_PATH_PROPERTY]
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
-    os_path_exists_mock.return_value = False
+    os_path_exists_mock.side_effect = [True]
 
     purge_stacks_and_mpacks(None)
-    os_path_exists_calls = []
+    os_path_exists_calls = [
+      call('/var/lib/ambari-server/resources'),
+    ]
     os_path_exists_mock.assert_has_calls(os_path_exists_calls)
 
+    os_path_exists_mock.side_effect = [True, False, False]
     purge_stacks_and_mpacks(options.purge_list.split(","))
     os_path_exists_calls = [
+      call('/var/lib/ambari-server/resources'),
       call(stacks_directory),
       call(mpacks_directory)
     ]
     os_path_exists_mock.assert_has_calls(os_path_exists_calls)
 
     options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+    os_path_exists_mock.side_effect = [True, False, False, False]
     purge_stacks_and_mpacks(options.purge_list.split(","))
     os_path_exists_calls = [
+      call('/var/lib/ambari-server/resources'),
       call(stacks_directory),
       call(common_services_directory),
       call(mpacks_directory)
@@ -183,8 +192,10 @@ class TestMpacks(TestCase):
     os_path_exists_mock.assert_has_calls(os_path_exists_calls)
 
     options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, EXTENSION_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+    os_path_exists_mock.side_effect = [True, False, False, False]
     purge_stacks_and_mpacks(options.purge_list.split(","))
     os_path_exists_calls = [
+      call('/var/lib/ambari-server/resources'),
       call(stacks_directory),
       call(extensions_directory),
       call(mpacks_directory)
@@ -193,10 +204,13 @@ class TestMpacks(TestCase):
 
     options.purge_list = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
     options.replay_mode = True
+    os_path_exists_mock.side_effect = [True, False, False, False]
     purge_stacks_and_mpacks(options.purge_list.split(","))
     os_path_exists_calls = [
+      call('/var/lib/ambari-server/resources'),
       call(stacks_directory),
-      call(common_services_directory)
+      call(common_services_directory),
+      call(mpacks_directory)
     ]
     os_path_exists_mock.assert_has_calls(os_path_exists_calls)
 
@@ -266,10 +280,11 @@ class TestMpacks(TestCase):
     get_ambari_version_mock.return_value = "2.4.0.0"
     run_os_command_mock.return_value = (0, "", "")
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
-    """
+
     os_path_exists_calls = [call('/tmp/mystack.tar.gz'),
                             call('mpacks/mystack-ambari-mpack-1.0.0.0/mpack.json'),
                             call('mpacks/mystack-ambari-mpack-1.0.0.0/hooks/before_install.py'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks'),
                             call('/var/lib/ambari-server/resources/extensions'),
                             call('/var/lib/ambari-server/resources/common-services'),
@@ -277,12 +292,14 @@ class TestMpacks(TestCase):
                             call(mpacks_directory + '/cache'),
                             call('/var/lib/ambari-server/resources/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/common-services/SERVICEA'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/common-services/SERVICEA/1.0/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/common-services/SERVICEA/2.0/dashboards'),
                             call('/var/lib/ambari-server/resources/common-services/SERVICEB'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/common-services/SERVICEB/1.0.0/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/common-services/SERVICEB/2.0.0/dashboards'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
@@ -295,22 +312,26 @@ class TestMpacks(TestCase):
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/stacks/MYSTACK/2.0/services/SERVICEA/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/stacks/MYSTACK/2.0/services/SERVICEB/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.0/hooks/after_install.py')]
-    """
-    os_path_exists_mock.side_effect = [True, True, True, False, True, False, False, False, False,
-                                       False, False, False, False, False, False,
-                                       False, False, False, False, False, False, False, False,
-                                       False, False, False, False, True]
+
+    os_path_exists_mock.side_effect = [True, True, True, True, False, True, False, False, False, False,
+                                       False, True, False, False, False, False, False, False, True, False,
+                                       False, False, False, False, False, False, False, False, False, False,
+                                       True]
     get_ambari_properties_mock.return_value = configs
     shutil_move_mock.return_value = True
 
-    install_mpack(options)
+    try:
+      install_mpack(options)
+    except Exception as e:
+      print e
 
     stacks_directory = configs[serverConfiguration.STACK_LOCATION_KEY]
     common_services_directory = configs[serverConfiguration.COMMON_SERVICES_PATH_PROPERTY]
     extensions_directory = configs[serverConfiguration.EXTENSION_PATH_PROPERTY]
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
-    dashboards_directory = serverConfiguration.get_dashboard_location(configs)
     mpacks_staging_directory = os.path.join(mpacks_directory, "mystack-ambari-mpack-1.0.0.0")
+    resources_directory = configs[serverConfiguration.RESOURCES_DIR_PROPERTY]
+    dashboards_directory = os.path.join(resources_directory, "dashboards")
 
     run_os_command_calls = [
       call([
@@ -377,6 +398,7 @@ class TestMpacks(TestCase):
            "SERVICEB", None)
     ]
 
+    os_path_exists_mock.assert_has_calls(os_path_exists_calls)
     self.assertTrue(purge_stacks_and_mpacks_mock.called)
     run_os_command_mock.assert_has_calls(run_os_command_calls)
     os_mkdir_mock.assert_has_calls(os_mkdir_calls)
@@ -403,8 +425,8 @@ class TestMpacks(TestCase):
     expand_mpack_mock.return_value = "mpacks/myextension-ambari-mpack-1.0.0.0"
     get_ambari_version_mock.return_value = "2.4.0.0"
 
-    os_path_exists_mock.side_effect = [True, True, True, False, True, False, False, False,
-                                       False, True, False, False, False]
+    os_path_exists_mock.side_effect = [True, True, True, True, False, True, False, False, False,
+                                       False, True, True, False, False, False]
     get_ambari_properties_mock.return_value = configs
     shutil_move_mock.return_value = True
 
@@ -413,10 +435,12 @@ class TestMpacks(TestCase):
     extensions_directory = configs[serverConfiguration.EXTENSION_PATH_PROPERTY]
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
     mpacks_staging_directory = os.path.join(mpacks_directory, "myextension-ambari-mpack-1.0.0.0")
-    dashboards_directory = serverConfiguration.get_dashboard_location(configs)
+    resources_directory = configs[serverConfiguration.RESOURCES_DIR_PROPERTY]
+    dashboards_directory = os.path.join(resources_directory, "dashboards")
 
     os_path_exists_calls = [call('/tmp/myextension.tar.gz'),
                             call('mpacks/myextension-ambari-mpack-1.0.0.0/mpack.json'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks'),
                             call('/var/lib/ambari-server/resources/extensions'),
                             call('/var/lib/ambari-server/resources/common-services'),
@@ -424,6 +448,7 @@ class TestMpacks(TestCase):
                             call(mpacks_directory + '/cache'),
                             call('/var/lib/ambari-server/resources/dashboards'),
                             call(mpacks_directory + '/myextension-ambari-mpack-1.0.0.0'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/extensions'),
                             call('/var/lib/ambari-server/resources/extensions/MYEXTENSION'),
                             call(mpacks_directory + '/myextension-ambari-mpack-1.0.0.0/extensions/MYEXTENSION/1.0/services'),
@@ -476,9 +501,9 @@ class TestMpacks(TestCase):
     expand_mpack_mock.return_value = "mpacks/myservice-ambari-mpack-1.0.0.0"
     get_ambari_version_mock.return_value = "2.4.0.0"
 
-    os_path_exists_mock.side_effect = [True, True, True, True, True, True,
-                                       True, True, False, False, False, False,
-                                       True, True, True, False, True, True,
+    os_path_exists_mock.side_effect = [True, True, True, True, True, True, True,
+                                       True, True, False, False, True, False, False,
+                                       True, True, True, True, False, True, True,
                                        True, False]
 
     get_ambari_properties_mock.return_value = configs
@@ -491,11 +516,13 @@ class TestMpacks(TestCase):
     common_services_directory = configs[serverConfiguration.COMMON_SERVICES_PATH_PROPERTY]
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
     mpacks_staging_directory = os.path.join(mpacks_directory, "myservice-ambari-mpack-1.0.0.0")
-    dashboards_directory = serverConfiguration.get_dashboard_location(configs)
+    resources_directory = configs[serverConfiguration.RESOURCES_DIR_PROPERTY]
+    dashboards_directory = os.path.join(resources_directory, "dashboards")
 
     os_path_exists_calls = [call('/tmp/myservice.tar.gz'),
                             call('mpacks/myservice-ambari-mpack-1.0.0.0/mpack.json'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks'),
                             call('/var/lib/ambari-server/resources/extensions'),
                             call('/var/lib/ambari-server/resources/common-services'),
@@ -503,8 +530,10 @@ class TestMpacks(TestCase):
                             call(mpacks_directory + '/cache'),
                             call('/var/lib/ambari-server/resources/dashboards'),
                             call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/common-services/MYSERVICE'),
                             call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0/common-services/MYSERVICE/1.0.0/dashboards'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
@@ -562,14 +591,14 @@ class TestMpacks(TestCase):
     get_ambari_version_mock.return_value = "2.4.0.0"
     run_os_command_mock.return_value = (0, "", "")
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
-    os_path_exists_mock.side_effect = [True, True, True, True, True, True, True, True,
-                                       True, True, True, True, True, True, True, False,
-                                       False, True, False, False, True, False, False,
-                                       False, False, False, True, True, True, False,
-                                       True, True, False, True, True, False, False,
-                                       False, False, False, True, True, True, True,
-                                       True, True, True, False, True, False, True, True,
-                                       True, True, True, True]
+    os_path_exists_mock.side_effect = [True, True, True, True, True, True, True, True, True, True,
+                                       True, True, True, True, True, True, True, False, False, True,
+                                       True, False, False, True, False, False, False, False, False, True,
+                                       True, True, True, False, True, True, False, True, True, False,
+                                       False, False, False, False, True, True, True, True, True, True,
+                                       True, False, True, False, True, True, True, True, True, True,
+                                       True]
+
     get_ambari_properties_mock.return_value = configs
     shutil_move_mock.return_value = True
 
@@ -579,10 +608,12 @@ class TestMpacks(TestCase):
     common_services_directory = configs[serverConfiguration.COMMON_SERVICES_PATH_PROPERTY]
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
     mpacks_staging_directory = os.path.join(mpacks_directory, "mystack-ambari-mpack-1.0.0.1")
-    dashboards_directory = serverConfiguration.get_dashboard_location(configs)
+    resources_directory = configs[serverConfiguration.RESOURCES_DIR_PROPERTY]
+    dashboards_directory = os.path.join(resources_directory, "dashboards")
 
     os_path_exists_calls = [call('/tmp/mystack-1.0.0.1.tar.gz'),
                             call('mpacks/mystack-ambari-mpack-1.0.0.1/mpack.json'),
+                            call('/var/lib/ambari-server/resources'),
                             call(mpacks_directory),
                             call(mpacks_directory + '/myextension-ambari-mpack-1.0.0.0/mpack.json'),
                             call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0/mpack.json'),
@@ -591,6 +622,7 @@ class TestMpacks(TestCase):
                             call('/tmp/mystack-1.0.0.1.tar.gz'),
                             call('mpacks/mystack-ambari-mpack-1.0.0.1/mpack.json'),
                             call('mpacks/mystack-ambari-mpack-1.0.0.1/hooks/before_upgrade.py'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks'),
                             call('/var/lib/ambari-server/resources/extensions'),
                             call('/var/lib/ambari-server/resources/common-services'),
@@ -598,6 +630,7 @@ class TestMpacks(TestCase):
                             call(mpacks_directory + '/cache'),
                             call('/var/lib/ambari-server/resources/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/common-services/SERVICEA'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/common-services/SERVICEA/1.0/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/common-services/SERVICEA/2.0/dashboards'),
@@ -607,6 +640,7 @@ class TestMpacks(TestCase):
                             call('/var/lib/ambari-server/resources/common-services/SERVICEC'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/common-services/SERVICEC/1.0.0/dashboards'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/common-services/SERVICEC/2.0.0/dashboards'),
+                            call('/var/lib/ambari-server/resources'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0'),
                             call('/var/lib/ambari-server/resources/stacks/MYSTACK/1.0/services'),
@@ -631,6 +665,7 @@ class TestMpacks(TestCase):
                             call('/var/lib/ambari-server/resources/dashboards/grafana-dashboards/SERVICEC'),
                             call(mpacks_directory + '/mystack-ambari-mpack-1.0.0.1/stacks/MYSTACK/3.0/services/SERVICEC/dashboards/service-metrics/SERVICEC.txt'),
                             call('/var/lib/ambari-server/resources/dashboards/service-metrics/SERVICEC.txt'),
+                            call('/var/lib/ambari-server/resources'),
                             call(mpacks_directory),
                             call(mpacks_directory + '/myextension-ambari-mpack-1.0.0.0/mpack.json'),
                             call(mpacks_directory + '/myservice-ambari-mpack-1.0.0.0/mpack.json'),
@@ -775,7 +810,7 @@ class TestMpacks(TestCase):
       serverConfiguration.COMMON_SERVICES_PATH_PROPERTY : os.path.join(uninstall_directory, "common-services"),
       serverConfiguration.EXTENSION_PATH_PROPERTY : os.path.join(uninstall_directory, "extensions"),
       serverConfiguration.MPACKS_STAGING_PATH_PROPERTY : mpacks_directory,
-      serverConfiguration.DASHBOARD_PATH_PROPERTY : os.path.join(uninstall_directory, "dashboards"),
+      serverConfiguration.RESOURCES_DIR_PROPERTY : uninstall_directory,
       serverConfiguration.SERVER_TMP_DIR_PROPERTY : "/tmp"
     }
 
@@ -784,7 +819,8 @@ class TestMpacks(TestCase):
     stacks_directory = fake_configs[serverConfiguration.STACK_LOCATION_KEY]
     extension_directory = fake_configs[serverConfiguration.EXTENSION_PATH_PROPERTY]
     common_services_directory = fake_configs[serverConfiguration.COMMON_SERVICES_PATH_PROPERTY]
-    dashboard_directory = fake_configs[serverConfiguration.DASHBOARD_PATH_PROPERTY]
+    resources_directory = fake_configs[serverConfiguration.RESOURCES_DIR_PROPERTY]
+    dashboards_directory = os.path.join(resources_directory, "dashboards")
 
     _uninstall_mpack("mystack-ambari-mpack", "1.0.0.1")
 
@@ -794,8 +830,8 @@ class TestMpacks(TestCase):
                               call(os.path.join(stacks_directory, "2.0/files/metainfo2.xml")),
                               call(os.path.join(extension_directory, "SERVICEB")),
                               call(os.path.join(common_services_directory, "SERVICEB")),
-                              call(os.path.join(dashboard_directory, "SERVICEB")),
-                              call(os.path.join(dashboard_directory, "files/STORM.txt"))]
+                              call(os.path.join(dashboards_directory, "SERVICEB")),
+                              call(os.path.join(dashboards_directory, "files/STORM.txt"))]
 
   def _create_empty_options_mock(self):
     options = MagicMock()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
index bb671aa..fe7456d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
@@ -22,11 +22,11 @@ import re
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
-curl_call = MagicMock(return_value=(0, "{ \"app\": {\"state\": \"FINISHED\",\"finalStatus\": \"SUCCEEDED\"}}",''))
+curl_returns = [(0, "{\"clusterInfo\":{\"id\": \"1471586271500\",\"haState\": \"ACTIVE\"}}",''),
+                         (0, "{\"app\":{\"state\": \"FINISHED\",\"finalStatus\":\"SUCCEEDED\"}}",'')]
 
 @patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
 @patch("sys.executable", new = '/usr/bin/python2.6')
-@patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output", new = curl_call)
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
@@ -38,32 +38,32 @@ class TestServiceCheck(RMFTestCase):
     re_search_mock.return_value = m
     m.group.return_value = "http://c6402.ambari.apache.org:8088/proxy/application_1429699682952_0010/"
 
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
-                          classname="ServiceCheck",
-                          command="service_check",
-                          config_file="default.json",
-                          stack_version = self.STACK_VERSION,
-                          target = RMFTestCase.TARGET_COMMON_SERVICES,
-                          checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
-                                "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]
-    )
-    self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              kinit_path_local = '/usr/bin/kinit',
-                              user = 'hdfs',
-                              dfs_type = '',
-                              mode = 0770,
-                              owner = 'ambari-qa',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              )
-    self.assertCurlCallForwardsCredentialsOnRedirect()
-    self.assertNoMoreResources()
-
+    with patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output", side_effect = curl_returns) as mock_curl:
+        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                           classname="ServiceCheck",
+                           command="service_check",
+                           config_file="default.json",
+                           stack_version = self.STACK_VERSION,
+                           target = RMFTestCase.TARGET_COMMON_SERVICES,
+                           checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
+                                                  "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]
+                           )
+        self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+                                  immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                                  security_enabled = False,
+                                  hadoop_bin_dir = '/usr/bin',
+                                  keytab = UnknownConfigurationMock(),
+                                  kinit_path_local = '/usr/bin/kinit',
+                                  user = 'hdfs',
+                                  dfs_type = '',
+                                  mode = 0770,
+                                  owner = 'ambari-qa',
+                                  action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
+                                  hadoop_conf_dir = '/etc/hadoop/conf',
+                                  type = 'directory',
+                                  )
+        self.assertCurlCallForwardsCredentialsOnRedirect(mock_curl_call = mock_curl)
+        self.assertNoMoreResources()
 
   @patch("re.search")
   def test_service_check_secured(self, re_search_mock):
@@ -71,31 +71,32 @@ class TestServiceCheck(RMFTestCase):
     re_search_mock.return_value = m
     m.group.return_value = "http://c6402.ambari.apache.org:8088/proxy/application_1429699682952_0010/"
 
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
-                          classname="ServiceCheck",
-                          command="service_check",
-                          config_file="secured.json",
-                          stack_version = self.STACK_VERSION,
-                          target = RMFTestCase.TARGET_COMMON_SERVICES,
-                          checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
-                               "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]
-    )
-    self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = True,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-                              kinit_path_local = '/usr/bin/kinit',
-                              user = 'hdfs',
-                              dfs_type = '',
-                              mode = 0770,
-                              owner = 'ambari-qa',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              )
-    self.assertCurlCallForwardsCredentialsOnRedirect()
-    self.assertNoMoreResources()
+    with patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output", side_effect = curl_returns) as mock_curl:
+        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                           classname="ServiceCheck",
+                           command="service_check",
+                           config_file="secured.json",
+                           stack_version = self.STACK_VERSION,
+                           target = RMFTestCase.TARGET_COMMON_SERVICES,
+                           checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
+                                                  "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]
+                           )
+        self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+                                  immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                                  security_enabled = True,
+                                  hadoop_bin_dir = '/usr/bin',
+                                  keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                                  kinit_path_local = '/usr/bin/kinit',
+                                  user = 'hdfs',
+                                  dfs_type = '',
+                                  mode = 0770,
+                                  owner = 'ambari-qa',
+                                  action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+                                  hadoop_conf_dir = '/etc/hadoop/conf',
+                                  type = 'directory',
+                                  )
+        self.assertCurlCallForwardsCredentialsOnRedirect(mock_curl_call = mock_curl)
+        self.assertNoMoreResources()
 
-  def assertCurlCallForwardsCredentialsOnRedirect(self):
-    self.assertIn('--location-trusted', curl_call.call_args[0][0])
\ No newline at end of file
+  def assertCurlCallForwardsCredentialsOnRedirect(self, mock_curl_call):
+    self.assertIn('--location-trusted', mock_curl_call.call_args[0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 4c6c2a3..4fa9e02 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1195,8 +1195,10 @@ class TestHDP206StackAdvisor(TestCase):
                   {'properties':
                      {'falcon_user': 'falcon'}},
                 'hdfs-site':
-                  {'properties': 
+                  {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1330,6 +1332,8 @@ class TestHDP206StackAdvisor(TestCase):
                 'hdfs-site':
                   {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1463,8 +1467,10 @@ class TestHDP206StackAdvisor(TestCase):
                      {'hive_user': 'hive',
                       'webhcat_user': 'webhcat'}},
                 'hdfs-site':
-                  {'properties': 
+                  {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hadoop-env':
                   {'properties':
@@ -1484,10 +1490,12 @@ class TestHDP206StackAdvisor(TestCase):
 
     expected["hdfs-site"] = {
       'properties': {
-        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
         'dfs.datanode.du.reserved': '10240000000',
         'dfs.internal.nameservices': 'mycluster',
-        'dfs.ha.namenodes.mycluster': 'nn1,nn2'
+        'dfs.ha.namenodes.mycluster': 'nn1,nn2',
+        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+        'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+        'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
       },
       'property_attributes': {
         'dfs.namenode.rpc-address': {


[10/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index ffca51d..62ce93b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.StackId;
@@ -128,24 +127,16 @@ public class ExecutionCommandWrapperTest {
     CONFIG_ATTRIBUTES = new HashMap<String, Map<String,String>>();
 
     //Cluster level global config
-    Config globalConfig = configFactory.createNew(cluster1, GLOBAL_CONFIG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
-    globalConfig.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(globalConfig);
+    configFactory.createNew(cluster1, GLOBAL_CONFIG, CLUSTER_VERSION_TAG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Cluster level service config
-    Config serviceSiteConfigCluster = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
-    serviceSiteConfigCluster.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigCluster);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, CLUSTER_VERSION_TAG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Service level service config
-    Config serviceSiteConfigService = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
-    serviceSiteConfigService.setTag(SERVICE_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigService);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_VERSION_TAG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
 
     //Host level service config
-    Config serviceSiteConfigHost = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
-    serviceSiteConfigHost.setTag(HOST_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigHost);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, HOST_VERSION_TAG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
 
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
index 90a4421..246c8b3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
@@ -34,8 +34,8 @@ import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.StackId;
 import org.junit.After;
@@ -103,15 +103,11 @@ public class TestActionSchedulerThreading {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String, String>>();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -122,12 +118,7 @@ public class TestActionSchedulerThreading {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 43503fa..fc2bca5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -193,11 +193,7 @@ public class HeartbeatTestHelper {
     cluster.setCurrentStackVersion(stackId);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(cluster, "cluster-env", configProperties, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
-
-    cluster.addConfig(config);
+    Config config = cf.createNew(cluster, "cluster-env", "version1", configProperties, new HashMap<String, Map<String, String>>());
     cluster.addDesiredConfig("user", Collections.singleton(config));
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index 76ab45c..68e9993 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -159,10 +159,8 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(cluster, "hadoop-env",
+    Config config = configFactory.createNew(cluster, "hadoop-env", "version1",
         new HashMap<String,String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version1");
-    cluster.addConfig(config);
     cluster.addDesiredConfig("_test", Collections.singleton(config));
 
 
@@ -243,18 +241,15 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env",
+    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env", "version1",
       new HashMap<String, String>() {{
         put("a", "b");
       }}, new HashMap<String, Map<String,String>>());
-    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env",
+    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env", "version1",
             new HashMap<String, String>() {{
               put("a", "b");
             }}, new HashMap<String, Map<String,String>>());
-    hadoopEnvConfig.setTag("version1");
-    cluster.addConfig(hadoopEnvConfig);
-    hbaseEnvConfig.setTag("version1");
-    cluster.addConfig(hbaseEnvConfig);
+
     cluster.addDesiredConfig("_test", Collections.singleton(hadoopEnvConfig));
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 6533e1c..6640837 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -218,7 +218,7 @@ public class RecoveryConfigHelperTest {
     config.updateProperties(new HashMap<String, String>() {{
       put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "false");
     }});
-    config.persist(false);
+    config.save();
 
     // Recovery config should be stale because of the above change.
     boolean isConfigStale = recoveryConfigHelper.isConfigStale(cluster.getClusterName(), DummyHostname1,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index e54a117..78b804c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -87,8 +87,8 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
@@ -610,6 +610,7 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+    Config config = createNiceMock(Config.class);
 
     // requests
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
@@ -632,18 +633,11 @@ public class AmbariManagementControllerImplTest {
     expect(clusters.getClusterById(1L)).andReturn(cluster).anyTimes();
     expect(cluster.getClusterName()).andReturn("clusterOld").anyTimes();
     expect(cluster.getConfigPropertiesTypes(anyObject(String.class))).andReturn(Maps.<PropertyInfo.PropertyType, Set<String>>newHashMap()).anyTimes();
-    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(new ConfigImpl("config-type") {
-      @Override
-      public Map<String, Map<String, String>> getPropertiesAttributes() {
-        return Maps.newHashMap();
-      }
-
-      @Override
-      public Map<String, String> getProperties() {
-        return configReqProps;
-      }
 
-    }).anyTimes();
+    expect(config.getType()).andReturn("config-type").anyTimes();
+    expect(config.getProperties()).andReturn(configReqProps).anyTimes();
+    expect(config.getPropertiesAttributes()).andReturn(new HashMap<String,Map<String,String>>()).anyTimes();
+    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(config).anyTimes();
 
     cluster.addSessionAttributes(anyObject(Map.class));
     expectLastCall().once();
@@ -652,7 +646,7 @@ public class AmbariManagementControllerImplTest {
     expectLastCall();
 
     // replay mocks
-    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    replay(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
 
     // test
     AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
@@ -660,7 +654,7 @@ public class AmbariManagementControllerImplTest {
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
-    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    verify(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
   }
 
   /**
@@ -1224,6 +1218,80 @@ public class AmbariManagementControllerImplTest {
   }
 
   @Test
+  public void testGetHostComponents___ServiceComponentHostFilteredByMaintenanceState() throws Exception {
+    // member state mocks
+    Injector injector = createStrictMock(Injector.class);
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    StackId stack = createNiceMock(StackId.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    final Host host = createNiceMock(Host.class);
+    Service service = createNiceMock(Service.class);
+    ServiceComponent component = createNiceMock(ServiceComponent.class);
+    MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
+    final ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
+    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
+
+    // requests
+    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
+        "cluster1", null, "component1", "host1", null);
+    request1.setMaintenanceState("ON");
+
+
+    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
+    setRequests.add(request1);
+
+    // expectations
+    // constructor init
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper);
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class));
+    expect(maintHelper.getEffectiveState(
+        anyObject(ServiceComponentHost.class),
+        anyObject(Host.class))).andReturn(MaintenanceState.IMPLIED_FROM_SERVICE).anyTimes();
+
+    // getHostComponent
+    expect(clusters.getCluster("cluster1")).andReturn(cluster);
+    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster));
+    expect(clusters.getHostsForCluster((String) anyObject())).andReturn(
+        new HashMap<String, Host>() {{
+          put("host1", host);
+        }}).anyTimes();
+
+    expect(cluster.getDesiredStackVersion()).andReturn(stack);
+    expect(cluster.getClusterName()).andReturn("cl1");
+    expect(stack.getStackName()).andReturn("stackName");
+    expect(stack.getStackVersion()).andReturn("stackVersion");
+
+    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
+    expect(cluster.getService("service1")).andReturn(service);
+    expect(service.getServiceComponent("component1")).andReturn(component);
+    expect(component.getName()).andReturn("component1").anyTimes();
+    expect(component.getServiceComponentHosts()).andReturn(new HashMap<String, ServiceComponentHost>() {{
+      put("host1", componentHost1);
+    }});
+
+    expect(componentHost1.convertToResponse(null)).andReturn(response1);
+    expect(componentHost1.getHostName()).andReturn("host1");
+
+    // replay mocks
+    replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo,
+        service, component, componentHost1, response1);
+
+    //test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    setAmbariMetaInfo(ambariMetaInfo, controller);
+
+    Set<ServiceComponentHostResponse> responses = controller.getHostComponents(setRequests);
+
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    assertTrue(responses.size() == 1);
+    verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1);
+  }
+
+  @Test
   public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundException() throws Exception {
     // member state mocks
     Injector injector = createStrictMock(Injector.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 66c0f181..298a85f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -122,7 +122,6 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
@@ -408,7 +407,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
       tag, "", configMap, hostMap);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     return configGroup.getId();
@@ -1281,6 +1279,15 @@ public class AmbariManagementControllerTest {
     assertEquals("false", ec.getCommandParams().get("command_retry_enabled"));
     Map<String, Set<String>> chInfo = ec.getClusterHostInfo();
     assertTrue(chInfo.containsKey("namenode_host"));
+    assertFalse(ec.getCommandParams().containsKey("custom_folder"));
+
+    ec = controller.getExecutionCommand(cluster,
+                                        s1.getServiceComponent("DATANODE").getServiceComponentHost(host1),
+                                        RoleCommand.START);
+    assertEquals(cluster1, ec.getClusterName());
+    assertNotNull(ec.getCommandParams());
+    assertTrue(ec.getCommandParams().containsKey("custom_folder"));
+    assertEquals("dashboards", ec.getCommandParams().get("custom_folder"));
   }
 
   @Test
@@ -1931,10 +1938,8 @@ public class AmbariManagementControllerTest {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String,String>>();
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
-    c1.setTag("v1");
-    cluster.addConfig(c1);
-    c1.persist();
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1",  properties, propertiesAttributes);
     configs.put(c1.getType(), c1);
 
     ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
@@ -1974,26 +1979,17 @@ public class AmbariManagementControllerTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1", properties, propertiesAttributes);
     properties.put("c", cluster1);
     properties.put("d", "d1");
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, injector);
+
+    Config c2 = configFactory.createNew(cluster, "core-site", "v1", properties, propertiesAttributes);
+    Config c3 = configFactory.createNew(cluster, "foo-site", "v1", properties, propertiesAttributes);
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
 
-    c1.setTag("v1");
-    c2.setTag("v1");
-    c3.setTag("v1");
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-
     configs.put(c1.getType(), c1);
     configs.put(c2.getType(), c2);
 
@@ -4201,27 +4197,20 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String, String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
 
-    Config config3 = cf.createNew(cluster, "yarn-site",
+    Config config3 = cf.createNew(cluster, "yarn-site", "version1",
         new HashMap<String, String>() {{
           put("test.password", "supersecret");
         }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
-    cluster.addConfig(config3);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -4374,20 +4363,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
 
@@ -4479,19 +4463,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-    config1.persist();
-    config2.persist();
 
     cluster.addConfig(config1);
     cluster.addConfig(config2);
@@ -4767,18 +4747,14 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
     config1.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr1", new HashMap<String, String>()); }});
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
     config2.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr2", new HashMap<String, String>()); }});
 
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("_test", Collections.singleton(config1));
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
@@ -5513,11 +5489,8 @@ public class AmbariManagementControllerTest {
       configs3, null);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "kerberos-env",
+    Config config1 = cf.createNew(cluster, "kerberos-env", "version1",
         new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    cluster.addConfig(config1);
 
     ClusterRequest crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
@@ -6439,20 +6412,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6545,20 +6513,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6972,13 +6935,13 @@ public class AmbariManagementControllerTest {
     String group2 = getUniqueName();
     String tag2 = getUniqueName();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // Create Config group for core-site
     configs = new HashMap<String, String>();
     configs.put("a", "c");
     cluster = clusters.getCluster(cluster1);
-    final Config config = new ConfigImpl("core-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    final Config config =  configFactory.createReadOnly("core-site", "version122", configs, null);
     Long groupId = createConfigGroup(cluster, group1, tag1,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config); }});
@@ -6989,9 +6952,7 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config2 = new ConfigImpl("mapred-site");
-    config2.setProperties(configs);
-    config2.setTag("version122");
+    final Config config2 =  configFactory.createReadOnly("mapred-site", "version122", configs, null);
     groupId = createConfigGroup(cluster, group2, tag2,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config2); }});
@@ -7056,7 +7017,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = cluster.getConfigGroups().get(groupId);
     configGroup.setHosts(new HashMap<Long, Host>() {{ put(3L,
       clusters.getHost(host3)); }});
-    configGroup.persist();
 
     requestId = startService(cluster1, serviceName2, false, false);
     mapredInstall = null;
@@ -7134,9 +7094,8 @@ public class AmbariManagementControllerTest {
     String group1 = getUniqueName();
     String tag1 = getUniqueName();
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
         new ArrayList<String>() {{
           add(host1);
@@ -7244,9 +7203,8 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
       new ArrayList<String>() {{ add(host1); add(host2); }},
       new ArrayList<Config>() {{ add(config); }});

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index f2dd099..02a2851 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -5919,6 +5919,10 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    hiveEnv.put("hive.atlas.hook", "false");
+    properties.put("hive-env", hiveEnv);
+
 
     Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
@@ -5996,6 +6000,9 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    properties.put("hive-env", hiveEnv);
+
     return properties;
   }
 
@@ -6047,6 +6054,10 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "foo");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    hiveEnv.put("hive.atlas.hook", "false");
+    properties.put("hive-env", hiveEnv);
+
 
     Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
     Configuration parentClusterConfig = new Configuration(parentProperties,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
index 6f592cd..a96f395 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
@@ -657,6 +657,37 @@ public class CalculatedStatusTest {
     assertEquals(HostRoleStatus.IN_PROGRESS, calc.getStatus());
   }
 
+  /**
+   * Tests that when there are no tasks and all counts are 0, that the returned
+   * status is {@link HostRoleStatus#COMPLETED}.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testGetCompletedStatusForNoTasks() throws Exception {
+    // no status / no tasks
+    CalculatedStatus status = CalculatedStatus.statusFromTaskEntities(
+        new ArrayList<HostRoleCommandEntity>(), false);
+
+    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
+
+    // empty summaries
+    status = CalculatedStatus.statusFromStageSummary(
+        new HashMap<Long, HostRoleCommandStatusSummaryDTO>(), new HashSet<Long>());
+
+    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
+
+    // generate a map of 0's - COMPLETED=0, IN_PROGRESS=0, etc
+    Map<HostRoleStatus, Integer> counts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
+    Map<HostRoleStatus, Integer> displayCounts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
+
+    HostRoleStatus hostRoleStatus = CalculatedStatus.calculateSummaryStatusOfUpgrade(counts, 0);
+    HostRoleStatus hostRoleDisplayStatus = CalculatedStatus.calculateSummaryDisplayStatus(displayCounts, 0, false);
+
+    assertEquals(HostRoleStatus.COMPLETED, hostRoleStatus);
+    assertEquals(HostRoleStatus.COMPLETED, hostRoleDisplayStatus);
+  }
+
   private Collection<HostRoleCommandEntity> getTaskEntities(HostRoleStatus... statuses) {
     Collection<HostRoleCommandEntity> entities = new LinkedList<HostRoleCommandEntity>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index 5dfc74d..f7dff11 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -20,11 +20,10 @@ package org.apache.ambari.server.controller.internal;
 
 
 import static org.apache.ambari.server.controller.internal.HostComponentResourceProvider.HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID;
-import org.apache.ambari.server.topology.Blueprint;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.newCapture;
 import static org.powermock.api.easymock.PowerMock.createMock;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
@@ -77,6 +76,7 @@ import org.apache.ambari.server.security.authorization.AuthorizationHelperInitia
 import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.LogicalRequest;
@@ -1200,7 +1200,7 @@ public class RequestResourceProviderTest {
       }
     }
     Assert.assertNotNull(propertyIdToAssert);
-    Assert.assertEquals("true", (String) propertyValueToAssert);
+    Assert.assertEquals("true", propertyValueToAssert);
   }
 
   @Test
@@ -1624,6 +1624,12 @@ public class RequestResourceProviderTest {
     verify(managementController, actionManager, clusters, requestMock, requestDAO, hrcDAO);
   }
 
+  /**
+   * Tests that topology requests return different status (PENDING) if there are
+   * no tasks. Normal requests should return COMPLETED.
+   *
+   * @throws Exception
+   */
   @Test
   @PrepareForTest(AmbariServer.class)
   public void testGetLogicalRequestStatusWithNoTasks() throws Exception {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index 96810cf..1747b28 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -66,7 +66,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -108,6 +108,7 @@ public class UpgradeResourceProviderHDP22Test {
   private AmbariManagementController amc;
   private StackDAO stackDAO;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   private static final String configTagVersion1 = "version1";
   private static final String configTagVersion2 = "version2";
@@ -136,6 +137,7 @@ public class UpgradeResourceProviderHDP22Test {
     stackDAO = injector.getInstance(StackDAO.class);
     upgradeDao = injector.getInstance(UpgradeDAO.class);
     repoVersionDao = injector.getInstance(RepositoryVersionDAO.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     AmbariEventPublisher publisher = createNiceMock(AmbariEventPublisher.class);
     replay(publisher);
@@ -233,11 +235,7 @@ public class UpgradeResourceProviderHDP22Test {
       }
     }
 
-    Config config = new ConfigImpl("hive-site");
-    config.setProperties(configTagVersion1Properties);
-    config.setTag(configTagVersion1);
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "hive-site", configTagVersion1, configTagVersion1Properties, null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
     Map<String, Object> requestProps = new HashMap<String, Object>();
@@ -286,9 +284,7 @@ public class UpgradeResourceProviderHDP22Test {
     // Hive service checks have generated the ExecutionCommands by now.
     // Change the new desired config tag and verify execution command picks up new tag
     assertEquals(configTagVersion1, cluster.getDesiredConfigByType("hive-site").getTag());
-    final Config newConfig = new ConfigImpl("hive-site");
-    newConfig.setProperties(configTagVersion2Properties);
-    newConfig.setTag(configTagVersion2);
+    final Config newConfig = configFactory.createNew(cluster, "hive-site", configTagVersion2, configTagVersion2Properties, null);
     Set<Config> desiredConfigs = new HashSet<Config>() {
       {
         add(newConfig);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index d69bdbe..844331e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -85,8 +85,8 @@ import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryActio
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
@@ -144,6 +144,7 @@ public class UpgradeResourceProviderTest {
   private StackDAO stackDAO;
   private AmbariMetaInfo ambariMetaInfo;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   @Before
   public void before() throws Exception {
@@ -174,6 +175,7 @@ public class UpgradeResourceProviderTest {
 
     amc = injector.getInstance(AmbariManagementController.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     Field field = AmbariServer.class.getDeclaredField("clusterController");
     field.setAccessible(true);
@@ -1046,16 +1048,9 @@ public class UpgradeResourceProviderTest {
     }
 
 
-    Config config = new ConfigImpl("zoo.cfg");
-    config.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
-    config.setTag("abcdefg");
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "zoo.cfg", "abcdefg", Collections.singletonMap("a", "b"), null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
-
     Map<String, Object> requestProps = new HashMap<String, Object>();
     requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
     requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.0.0");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
index b58350b..a36ce0d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
@@ -23,7 +23,6 @@ import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.Executor;
 
@@ -51,8 +50,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
     LoggingRequestHelper helperMock =
       mockSupport.createMock(LoggingRequestHelper.class);
@@ -62,8 +60,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -71,8 +68,7 @@ public class LogSearchDataRetrievalServiceTest {
     String resultTailFileURI =
       retrievalService.getLogFileTailURI("http://localhost", expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertEquals("TailFileURI was not returned as expected",
-                 expectedResultURI, resultTailFileURI);
+    assertEquals("TailFileURI was not returned as expected", expectedResultURI, resultTailFileURI);
 
     mockSupport.verifyAll();
   }
@@ -85,8 +81,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
     // return null, to simulate the case where LogSearch Server is
     // not available for some reason
@@ -94,8 +89,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -103,11 +97,9 @@ public class LogSearchDataRetrievalServiceTest {
     String resultTailFileURI =
       retrievalService.getLogFileTailURI("http://localhost", expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("TailFileURI should be null in this case",
-               resultTailFileURI);
+    assertNull("TailFileURI should be null in this case", resultTailFileURI);
 
     mockSupport.verifyAll();
-
   }
 
   @Test
@@ -118,11 +110,9 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
-    Executor executorMock =
-      mockSupport.createMock(Executor.class);
+    Executor executorMock = mockSupport.createMock(Executor.class);
 
     // expect the executor to be called to execute the LogSearch request
     executorMock.execute(isA(LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable.class));
@@ -131,24 +121,19 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
     retrievalService.setExecutor(executorMock);
 
 
-    assertEquals("Default request set should be empty",
-                 0, retrievalService.getCurrentRequests().size());
+    assertEquals("Default request set should be empty", 0, retrievalService.getCurrentRequests().size());
 
-    Set<String> resultSet =
-      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+    Set<String> resultSet = retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("Inital query on the retrieval service should be null, since cache is empty by default",
-                resultSet);
-    assertEquals("Incorrect number of entries in the current request set",
-                 1, retrievalService.getCurrentRequests().size());
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
+    assertEquals("Incorrect number of entries in the current request set", 1, retrievalService.getCurrentRequests().size());
     assertTrue("Incorrect HostComponent set on request set",
                 retrievalService.getCurrentRequests().contains(expectedComponentName + "+" + expectedHostName));
 
@@ -163,16 +148,13 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
-    Executor executorMock =
-      mockSupport.createMock(Executor.class);
+    Executor executorMock = mockSupport.createMock(Executor.class);
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -184,16 +166,15 @@ public class LogSearchDataRetrievalServiceTest {
     // but is not yet completed.
     retrievalService.getCurrentRequests().add(expectedComponentName + "+" + expectedHostName);
 
-    Set<String> resultSet =
-      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+    Set<String> resultSet = retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("Inital query on the retrieval service should be null, since cache is empty by default",
-      resultSet);
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
 
     mockSupport.verifyAll();
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithSuccessfulCall() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -202,17 +183,12 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
-    LoggingRequestHelper helperMock =
-      mockSupport.createMock(LoggingRequestHelper.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(Collections.singleton("/this/is/just/a/test/directory"));
@@ -225,14 +201,14 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-                                                                         cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
-
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithFailedCallNullHelper() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -241,15 +217,11 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     // return null to simulate an error during helper instance creation
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(null);
@@ -261,7 +233,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-        cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
@@ -269,6 +241,7 @@ public class LogSearchDataRetrievalServiceTest {
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithFailedCallNullResult() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -277,17 +250,12 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
-    LoggingRequestHelper helperMock =
-      mockSupport.createMock(LoggingRequestHelper.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     // return null to simulate an error occurring during the LogSearch data request
@@ -300,11 +268,43 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-        cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
-
   }
 
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRunnableWithFailedCallEmptyResult() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+    final String expectedComponentAndHostName = expectedComponentName + "+" + expectedHostName;
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
+
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+
+    expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
+    // return null to simulate an error occurring during the LogSearch data request
+    expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(Collections.EMPTY_SET);
+    // expect that the completed request is removed from the current request set,
+    // even in the event of a failure to obtain the LogSearch data
+    expect(currentRequestsMock.remove(expectedComponentAndHostName)).andReturn(true).once();
+
+    mockSupport.replayAll();
+
+    LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
+      new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+    loggingRunnable.run();
+
+    mockSupport.verifyAll();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
index b839b64..12b5b69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
@@ -122,6 +122,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogQueryRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     EasyMockSupport mockSupport =
       new EasyMockSupport();
 
@@ -301,6 +302,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogLevelRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     EasyMockSupport mockSupport =
       new EasyMockSupport();
 
@@ -391,6 +393,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogFileNameRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedComponentName = "hdfs_namenode";
 
     EasyMockSupport mockSupport =
@@ -478,6 +481,7 @@ public class LoggingRequestHelperImplTest {
    */
   @Test
   public void testLogQueryRequestBasicCredentialsNotInConfig() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedClusterName = "my-test-cluster";
 
     EasyMockSupport mockSupport =
@@ -656,6 +660,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testCreateLogFileTailURI() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedPort = "61888";
     final String expectedComponentName = "hdfs_namenode";

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
index 6773176..a4a9e26 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
@@ -90,6 +90,7 @@ public class InMemoryDefaultTestModule extends AbstractModule {
     String stacks = "src/test/resources/stacks";
     String version = "src/test/resources/version";
     String sharedResourcesDir = "src/test/resources/";
+    String resourcesDir = "src/test/resources/";
     if (System.getProperty("os.name").contains("Windows")) {
       stacks = ClassLoader.getSystemClassLoader().getResource("stacks").getPath();
       version = new File(new File(ClassLoader.getSystemClassLoader().getResource("").getPath()).getParent(), "version").getPath();
@@ -116,6 +117,10 @@ public class InMemoryDefaultTestModule extends AbstractModule {
       properties.setProperty(Configuration.SHARED_RESOURCES_DIR.getKey(), sharedResourcesDir);
     }
 
+    if (!properties.containsKey(Configuration.RESOURCES_DIR.getKey())) {
+      properties.setProperty(Configuration.RESOURCES_DIR.getKey(), resourcesDir);
+    }
+
     try {
       install(Modules.override(new BeanDefinitionsCachingTestControllerModule(properties)).with(new AbstractModule() {
         @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 2388c11..aafe557 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -17,15 +17,11 @@
  */
 package org.apache.ambari.server.orm.dao;
 
-import static org.easymock.EasyMock.createMockBuilder;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Date;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -40,9 +36,12 @@ import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -393,7 +392,7 @@ public class ServiceConfigDAOTest {
     serviceConfigs = serviceConfigDAO.getLatestServiceConfigs(clusterId, HDP_02);
     Assert.assertEquals(2, serviceConfigs.size());
   }
-  
+
   @Test
   public void testConfiguration() throws Exception{
     initClusterEntities();
@@ -401,17 +400,17 @@ public class ServiceConfigDAOTest {
 
     Assert.assertTrue(!clusterEntity.getClusterConfigEntities().isEmpty());
     Assert.assertTrue(!clusterEntity.getConfigMappingEntities().isEmpty());
-    
+
     Assert.assertEquals(5, clusterEntity.getClusterConfigEntities().size());
     Assert.assertEquals(3, clusterEntity.getConfigMappingEntities().size());
   }
-  
+
   @Test
   public void testGetClusterConfigMappingByStack() throws Exception{
     initClusterEntities();
-    
+
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    
+
     List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
     Assert.assertEquals(2, clusterConfigMappingEntities .size());
 
@@ -420,14 +419,14 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("version1", tag1);
     String type1 = e1.getType();
     Assert.assertEquals("oozie-site", type1);
-    
+
     ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
     String tag2 = e2.getTag();
     Assert.assertEquals("version2", tag2);
     String type2 = e2.getType();
     Assert.assertEquals("oozie-site", type2);
   }
-  
+
   /**
    * Test the get latest configuration query against clusterconfig table with configuration groups inserted
    * */
@@ -435,9 +434,9 @@ public class ServiceConfigDAOTest {
   public void testGetClusterConfigMappingByStackCG() throws Exception{
     initClusterEntitiesWithConfigGroups();
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    
+
     List<ConfigGroupEntity> configGroupEntities = configGroupDAO.findAllByTag("OOZIE");
-    
+
     Assert.assertNotNull(configGroupEntities);
     ConfigGroupEntity configGroupEntity = configGroupEntities.get(0);
     Assert.assertNotNull(configGroupEntity);
@@ -447,7 +446,7 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("oozie_server", configGroupEntity.getGroupName());
     Assert.assertEquals("OOZIE", configGroupEntity.getTag());
     Assert.assertEquals("oozie server", configGroupEntity.getDescription());
-    
+
     List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
     Assert.assertEquals(2, clusterConfigMappingEntities .size());
 
@@ -456,97 +455,95 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("version1", tag1);
     String type1 = e1.getType();
     Assert.assertEquals("oozie-site", type1);
-    
+
     ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
     String tag2 = e2.getTag();
     Assert.assertEquals("version2", tag2);
     String type2 = e2.getType();
     Assert.assertEquals("oozie-site", type2);
   }
-  
+
   /**
-   * Test  
+   * Test
    *
-   * When the last configuration of a given configuration type to be stored into the clusterconfig table is 
+   * When the last configuration of a given configuration type to be stored into the clusterconfig table is
    * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
    *
    * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
    *
-   * Test to determine the latest configuration of a given type whose version_tag 
+   * Test to determine the latest configuration of a given type whose version_tag
    * exists in the clusterconfigmapping table.
    *
    * */
   @Test
-  public void testGetLatestClusterConfigMappingByStack() throws Exception{
-    ClusterImpl cluster =
-        createMockBuilder(ClusterImpl.class).
-          addMockedMethod("getSessionManager").
-          addMockedMethod("getClusterName").
-          addMockedMethod("getSessionAttributes").
-          createMock();
-    
+  public void testGetLatestClusterConfigMappingByStack() throws Exception {
+    Clusters clusters = injector.getInstance(Clusters.class);
+    clusters.addCluster("c1", HDP_01);
+
+    Cluster cluster = clusters.getCluster("c1");
+
     initClusterEntities();
-    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
-    Collection<ClusterConfigMappingEntity> latestMapingEntities = cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
+        cluster.getClusterId(), HDP_01);
+
     Assert.assertEquals(1, latestMapingEntities.size());
     for(ClusterConfigMappingEntity e: latestMapingEntities){
       Assert.assertEquals("version2", e.getTag());
       Assert.assertEquals("oozie-site", e.getType());
     }
   }
-  
+
   /**
-   * Test  
+   * Test
    *
-   * When the last configuration of a given configuration type to be stored into the clusterconfig table is 
+   * When the last configuration of a given configuration type to be stored into the clusterconfig table is
    * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
    *
    * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
    *
-   * Test to determine the latest configuration of a given type whose version_tag 
+   * Test to determine the latest configuration of a given type whose version_tag
    * exists in the clusterconfigmapping table.
    *
    * */
   @Test
   public void testGetLatestClusterConfigMappingByStackCG() throws Exception{
-    ClusterImpl cluster =
-        createMockBuilder(ClusterImpl.class).
-          addMockedMethod("getSessionManager").
-          addMockedMethod("getClusterName").
-          addMockedMethod("getSessionAttributes").
-          createMock();
-    
+    Clusters clusters = injector.getInstance(Clusters.class);
+    clusters.addCluster("c1", HDP_01);
+
+    Cluster cluster = clusters.getCluster("c1");
+
     initClusterEntitiesWithConfigGroups();
-    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
-    Collection<ClusterConfigMappingEntity> latestMapingEntities = cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
+        cluster.getClusterId(), HDP_01);
+
     Assert.assertEquals(1, latestMapingEntities.size());
     for(ClusterConfigMappingEntity e: latestMapingEntities){
       Assert.assertEquals("version2", e.getTag());
       Assert.assertEquals("oozie-site", e.getType());
     }
   }
-  
+
   private void initClusterEntities() throws Exception{
     String userName = "admin";
-    
+
     ServiceConfigEntity oozieServiceConfigEntity = createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), null);
     ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
-    
+
     Long clusterId = clusterEntity.getClusterId();
-    
+
     if(null == clusterId){
       clusterId = 1L;
       clusterEntity.setClusterId(clusterId);
       clusterEntity = clusterDAO.merge(clusterEntity);
     }
-    
+
     StackEntity stackEntityHDP01 = stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
     StackEntity stackEntityHDP02 = stackDAO.find(HDP_02.getStackName(),HDP_02.getStackVersion());
-    
+
     String oozieSite = "oozie-site";
-    
+
     for (int i = 1; i < 6; i++){
       ClusterConfigEntity entity = new ClusterConfigEntity();
       entity.setClusterEntity(clusterEntity);
@@ -555,22 +552,23 @@ public class ServiceConfigDAOTest {
       entity.setVersion(Long.valueOf(i));
       entity.setTag("version"+i);
       entity.setTimestamp(new Date().getTime());
-      if(i < 4)
+      if(i < 4) {
         entity.setStack(stackEntityHDP01);
-      else
+      } else {
         entity.setStack(stackEntityHDP02);
+      }
       entity.setData("");
       clusterDAO.createConfig(entity);
       clusterEntity.getClusterConfigEntities().add(entity);
       clusterDAO.merge(clusterEntity);
     }
-    
+
     Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
     if(null == entities){
       entities = new ArrayList<ClusterConfigMappingEntity>();
       clusterEntity.setConfigMappingEntities(entities);
-    }  
-    
+    }
+
     ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
     e1.setClusterEntity(clusterEntity);
     e1.setClusterId(clusterEntity.getClusterId());
@@ -581,7 +579,7 @@ public class ServiceConfigDAOTest {
     e1.setTag("version1");
     entities.add(e1);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
     e2.setClusterEntity(clusterEntity);
     e2.setClusterId(clusterEntity.getClusterId());
@@ -592,7 +590,7 @@ public class ServiceConfigDAOTest {
     e2.setTag("version2");
     entities.add(e2);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e3 = new ClusterConfigMappingEntity();
     e3.setClusterEntity(clusterEntity);
     e3.setClusterId(clusterEntity.getClusterId());
@@ -603,25 +601,25 @@ public class ServiceConfigDAOTest {
     e3.setTag("version4");
     entities.add(e3);
     clusterDAO.merge(clusterEntity);
-  } 
-  
+  }
+
   private void initClusterEntitiesWithConfigGroups() throws Exception{
     String userName = "admin";
-    
+
     ServiceConfigEntity oozieServiceConfigEntity = createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), null);
     ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
-    
+
     Long clusterId = clusterEntity.getClusterId();
-    
+
     if(null == clusterId){
       clusterId = 1L;
       clusterEntity.setClusterId(clusterId);
       clusterEntity = clusterDAO.merge(clusterEntity);
     }
-    
+
     StackEntity stackEntityHDP01 = stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
     String oozieSite = "oozie-site";
-    
+
     int count = 3;
     for (int i = 1; i < count; i++){
       ClusterConfigEntity entity = new ClusterConfigEntity();
@@ -637,13 +635,13 @@ public class ServiceConfigDAOTest {
       clusterEntity.getClusterConfigEntities().add(entity);
       clusterDAO.merge(clusterEntity);
     }
-    
+
     Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
     if(null == entities){
       entities = new ArrayList<ClusterConfigMappingEntity>();
       clusterEntity.setConfigMappingEntities(entities);
-    }  
-    
+    }
+
     ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
     e1.setClusterEntity(clusterEntity);
     e1.setClusterId(clusterEntity.getClusterId());
@@ -654,7 +652,7 @@ public class ServiceConfigDAOTest {
     e1.setTag("version1");
     entities.add(e1);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
     e2.setClusterEntity(clusterEntity);
     e2.setClusterId(clusterEntity.getClusterId());
@@ -665,7 +663,7 @@ public class ServiceConfigDAOTest {
     e2.setTag("version2");
     entities.add(e2);
     clusterDAO.merge(clusterEntity);
-    
+
     ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
 
     ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
@@ -695,9 +693,9 @@ public class ServiceConfigDAOTest {
     List<ClusterConfigEntity> configEntities = new
       ArrayList<ClusterConfigEntity>();
     configEntities.add(configEntity);
-    
+
     configGroupDAO.create(configGroupEntity);
-    
+
     if (configEntities != null && !configEntities.isEmpty()) {
       List<ConfigGroupConfigMappingEntity> configMappingEntities = new
         ArrayList<ConfigGroupConfigMappingEntity>();
@@ -719,7 +717,7 @@ public class ServiceConfigDAOTest {
         configMappingEntities.add(configMappingEntity);
         configGroupConfigMappingDAO.create(configMappingEntity);
       }
-      
+
       configGroupEntity.setConfigGroupConfigMappingEntities(configMappingEntities);
       configGroupDAO.merge(configGroupEntity);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
index 1866b12..2840e3d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.orm.entities.PrivilegeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.authorization.AmbariLdapUtils;
 import org.apache.ambari.server.security.authorization.Group;
+import org.apache.ambari.server.security.authorization.GroupType;
 import org.apache.ambari.server.security.authorization.LdapServerProperties;
 import org.apache.ambari.server.security.authorization.User;
 import org.apache.ambari.server.security.authorization.Users;
@@ -1576,7 +1577,7 @@ public class AmbariLdapDataPopulatorTest {
     final GroupEntity ldapGroup = new GroupEntity();
     ldapGroup.setGroupId(1);
     ldapGroup.setGroupName("ldapGroup");
-    ldapGroup.setLdapGroup(true);
+    ldapGroup.setGroupType(GroupType.LDAP);
     ldapGroup.setMemberEntities(new HashSet<MemberEntity>());
 
     final User ldapUserWithoutGroup = createLdapUserWithoutGroup();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 0163024..80a3bc5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -49,8 +49,7 @@ import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -113,6 +112,9 @@ public class ComponentVersionCheckActionTest {
   @Inject
   private ServiceComponentHostFactory serviceComponentHostFactory;
 
+  @Inject
+  private ConfigFactory configFactory;
+
   @Before
   public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -399,18 +401,11 @@ public class ComponentVersionCheckActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version1", properties, propertiesAttributes);
     properties.put("c", "c1");
     properties.put("d", "d1");
 
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
+    configFactory.createNew(cluster, "core-site", "version1", properties, propertiesAttributes);
+    configFactory.createNew(cluster, "foo-site", "version1", properties, propertiesAttributes);
   }
 }


[07/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index 7835262..f9fb1f5 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -487,6 +487,8 @@ class TestHDP21StackAdvisor(TestCase):
       "hdfs-site": {
         "properties": {
           'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
           'dfs.datanode.du.reserved': '10240000000'
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index dce32d3..9d8ba08 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3068,7 +3068,9 @@ class TestHDP22StackAdvisor(TestCase):
           'dfs.namenode.safemode.threshold-pct': '1.000',
           'dfs.datanode.failed.volumes.tolerated': '1',
           'dfs.namenode.handler.count': '25',
-          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
+          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4',
+          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary'
         },
         'property_attributes': {
           'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'},
@@ -3681,12 +3683,9 @@ class TestHDP22StackAdvisor(TestCase):
             "cpu_count" : 6,
             "total_mem" : 50331648,
             "disk_info" : [
-              {"mountpoint" : "/"},
-              {"mountpoint" : "/dev/shm"},
-              {"mountpoint" : "/vagrant"},
-              {"mountpoint" : "/"},
-              {"mountpoint" : "/dev/shm"},
-              {"mountpoint" : "/vagrant"}
+              {"mountpoint" : "/", "type": "ext3"},
+              {"mountpoint" : "/dev/shm", "type": "tmpfs"},
+              {"mountpoint" : "/vagrant", "type": "vboxsf"}
             ],
             "public_host_name" : "c6401.ambari.apache.org",
             "host_name" : "c6401.ambari.apache.org"
@@ -3736,15 +3735,19 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "43008",
+          "yarn.scheduler.minimum-allocation-mb": "14336",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "43008",
           "yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
         },
         "property_attributes": {
           "yarn.scheduler.minimum-allocation-vcores": {
@@ -3757,18 +3760,19 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           }
         }
       }
     }
 
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
     clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
     self.assertEquals(clusterData['hbaseRam'], 8)
 
@@ -3792,15 +3796,19 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.linux-container-executor.group": "hadoop",
           "yarn.nodemanager.container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "43008",
+          "yarn.scheduler.minimum-allocation-mb": "14336",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "43008",
           "yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
         },
         "property_attributes": {
           "yarn.nodemanager.container-executor.cgroups.mount": {
@@ -3822,13 +3830,13 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.container-executor.resources-handler.class": {
             "delete": "true"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 39cbbce..4a77086 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -587,7 +587,8 @@ class TestHDP23StackAdvisor(TestCase):
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_security_authorization': 'None',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off'
+          'hive_txn_acid': 'off',
+          'hive.atlas.hook': 'false'
         }
       },
       'hive-site': {
@@ -745,6 +746,7 @@ class TestHDP23StackAdvisor(TestCase):
         },
         "hive-env": {
           "properties": {
+            "hive.atlas.hook": "false"
           }
         },
         "hive-site": {
@@ -863,7 +865,8 @@ class TestHDP23StackAdvisor(TestCase):
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_security_authorization': 'None',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off'
+          'hive_txn_acid': 'off',
+          'hive.atlas.hook': 'true'
         }
       },
       'hive-site': {
@@ -1045,6 +1048,7 @@ class TestHDP23StackAdvisor(TestCase):
         },
         "hive-env": {
           "properties": {
+            "hive.atlas.hook": "false"
           }
         },
         "hive-site": {
@@ -1462,6 +1466,31 @@ class TestHDP23StackAdvisor(TestCase):
             }
           ]
         },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX",
+          "StackServices": {
+            "service_name": "KNOX",
+            "service_version": "0.9.0.2.3",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX/components/KNOX_GATEWAY",
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1+",
+                "component_category": "MASTER",
+                "component_name": "KNOX_GATEWAY",
+                "display_name": "Knox Gateway",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        }
         ],
       "configurations": {
         "admin-properties": {
@@ -1473,6 +1502,7 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "ranger.service.http.port": "7777",
             "ranger.service.http.enabled": "true",
+            "ranger.sso.providerurl": "",
             }
         }
       },
@@ -1515,7 +1545,8 @@ class TestHDP23StackAdvisor(TestCase):
       'ranger-admin-site': {
         'properties': {
           "ranger.audit.solr.zookeepers": "NONE",
-          "ranger.audit.source.type": "solr"
+          "ranger.audit.source.type": "solr",
+          "ranger.sso.providerurl": "https://c6401.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso"
         }
       },
       'ranger-env': {
@@ -1733,6 +1764,11 @@ class TestHDP23StackAdvisor(TestCase):
         "properties": {
           "ranger-storm-plugin-enabled": "No"
         }
+      },
+      "storm-env": {
+        "properties": {
+          "storm.atlas.hook": "true"
+        }
       }
     }
     services = {
@@ -1773,6 +1809,11 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "ranger-storm-plugin-enabled": "No"
           }
+        },
+        "storm-env": {
+          "properties": {
+          "storm.atlas.hook": "false"
+          }
         }
       },
       "changed-configurations": [ ]
@@ -1830,6 +1871,11 @@ class TestHDP23StackAdvisor(TestCase):
         'properties': {
           'sqoop.job.data.publish.class': 'org.apache.atlas.sqoop.hook.SqoopHook',
         }
+      },
+      'sqoop-env': {
+        'properties': {
+          'sqoop.atlas.hook': 'true'
+        }
       }
     }
     services = {
@@ -1864,6 +1910,11 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "sqoop.job.data.publish.class": "foo"
           }
+        },
+        "sqoop-env": {
+          "properties": {
+            "sqoop.atlas.hook": "false"
+          }
         }
       },
       "changed-configurations": [ ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
index 005283f..6af463c 100644
--- a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
+++ b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
@@ -95,7 +95,7 @@ class TestInfraSolr(RMFTestCase):
                                 content = InlineTemplate(self.getConfig()['configurations']['infra-solr-log4j']['content'])
       )
 
-      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 5 --interval 10')
+      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
index 6b4fab8..df54cd8 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
@@ -129,7 +129,7 @@ class TestLogSearch(RMFTestCase):
                               content = InlineTemplate(self.getConfig()['configurations']['logsearch-audit_logs-solrconfig']['content'])
                               )
 
-    self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 30 --interval 5')
     self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_hadoop_logs_0.[0-9]* --config-set hadoop_logs --retry 30 --interval 5')
     self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_hadoop_logs_0.[0-9]*',
                                     content=InlineTemplate(self.getConfig()['configurations']['logsearch-service_logs-solrconfig']['content']),


[03/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
new file mode 100644
index 0000000..030b717
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -0,0 +1,773 @@
+{
+    "localComponents": [
+        "SECONDARY_NAMENODE", 
+        "HDFS_CLIENT", 
+        "DATANODE", 
+        "NAMENODE", 
+        "RANGER_ADMIN", 
+        "RANGER_TAGSYNC", 
+        "RANGER_USERSYNC", 
+        "ZOOKEEPER_SERVER", 
+        "ZOOKEEPER_CLIENT", 
+        "KERBEROS_CLIENT",
+        "LOGSEARCH_SOLR",
+        "LOGSEARCH_SOLR_CLIENT"
+    ], 
+    "configuration_attributes": {
+        "ranger-hdfs-audit": {}, 
+        "ssl-client": {}, 
+        "ranger-admin-site": {}, 
+        "ranger-hdfs-policymgr-ssl": {}, 
+        "tagsync-application-properties": {}, 
+        "ranger-env": {}, 
+        "usersync-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "kerberos-env": {}, 
+        "admin-properties": {}, 
+        "ranger-ugsync-site": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.datanode.data.dir": "true", 
+                "dfs.namenode.http-address": "true", 
+                "dfs.datanode.failed.volumes.tolerated": "true", 
+                "dfs.support.append": "true", 
+                "dfs.namenode.name.dir": "true", 
+                "dfs.webhdfs.enabled": "true"
+            }
+        }, 
+        "ranger-tagsync-site": {}, 
+        "zoo.cfg": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "krb5-conf": {}, 
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        }, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "ssl-server": {}, 
+        "ranger-site": {}, 
+        "admin-log4j": {}, 
+        "tagsync-log4j": {}, 
+        "ranger-hdfs-security": {}, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {},
+        "infra-solr-env": {},
+        "infra-solr-client-log4j": {},
+        "cluster-env": {}
+    }, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "commandId": "41-2", 
+    "hostname": "c6401.ambari.apache.org", 
+    "kerberosCommandParams": [], 
+    "serviceName": "RANGER", 
+    "role": "RANGER_ADMIN", 
+    "forceRefreshConfigTagsBeforeExecution": [], 
+    "requestId": 41, 
+    "agentConfigParams": {
+        "agent": {
+            "parallel_execution": 0
+        }
+    }, 
+    "clusterName": "test_Cluster01", 
+    "commandType": "EXECUTION_COMMAND", 
+    "taskId": 186, 
+    "roleParams": {}, 
+    "configurationTags": {
+        "ranger-hdfs-audit": {
+            "tag": "version1466705299922"
+        }, 
+        "ssl-client": {
+            "tag": "version1"
+        }, 
+        "ranger-admin-site": {
+            "tag": "version1467016680635"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "tag": "version1466705299922"
+        }, 
+        "tagsync-application-properties": {
+            "tag": "version1467016680511"
+        }, 
+        "ranger-env": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-ugsync-site": {
+            "tag": "version1467016680537"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1466705299922"
+        }, 
+        "kerberos-env": {
+            "tag": "version1467016537243"
+        }, 
+        "admin-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "hdfs-site": {
+            "tag": "version1467016680401"
+        }, 
+        "ranger-tagsync-site": {
+            "tag": "version1467016680586"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "usersync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "krb5-conf": {
+            "tag": "version1467016537243"
+        }, 
+        "core-site": {
+            "tag": "version1467016680612"
+        }, 
+        "hadoop-env": {
+            "tag": "version1467016680446"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "ssl-server": {
+            "tag": "version1"
+        }, 
+        "ranger-site": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "tagsync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-security": {
+            "tag": "version1466705299922"
+        }, 
+        "usersync-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1467016680492"
+        },
+        "infra-solr-env": {
+            "tag": "version1467098537360"
+        },
+        "infra-solr-client-log4j": {
+            "tag": "version1467096917836"
+        },
+        "cluster-env": {
+            "tag": "version1467016680567"
+        }
+    }, 
+    "roleCommand": "START", 
+    "hostLevelParams": {
+        "agent_stack_retry_on_unavailability": "false", 
+        "stack_name": "HDP", 
+        "package_version": "2_5_0_0_*", 
+        "custom_mysql_jdbc_name": "mysql-connector-java.jar",
+        "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
+        "host_sys_prepped": "false", 
+        "ambari_db_rca_username": "mapred", 
+        "current_version": "2.6.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
+        "agent_stack_retry_count": "5", 
+        "stack_version": "2.6",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "java_version": "8", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
+        "db_name": "ambari", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "ambari_db_rca_password": "mapred", 
+        "jce_name": "jce_policy-8.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/RANGER/0.4.0/package", 
+        "script": "scripts/ranger_admin.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.6.0.0-801",
+        "max_duration_for_retries": "0", 
+        "command_retry_enabled": "false", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "forceRefreshConfigTags": [], 
+    "stageId": 2, 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_use_ssl": [
+            "false"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "ranger_tagsync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ranger_usersync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_port": [
+            "8080"
+        ], 
+        "ranger_admin_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "172.22.83.73"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6401.ambari.apache.org"
+        ],
+        "infra_solr_hosts": [
+            "c6401.ambari.apache.org"
+        ]
+    }, 
+    "configurations": {
+        "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
+            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.solr": "false",
+            "xasecure.audit.provider.summary.enabled": "false", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "ssl-client": {
+            "ssl.client.truststore.reload.interval": "10000", 
+            "ssl.client.keystore.password": "bigdata", 
+            "ssl.client.truststore.type": "jks", 
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
+            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.keystore.type": "jks"
+        }, 
+        "ranger-admin-site": {
+            "ranger.is.solr.kerberised": "true",
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
+            "ranger.kms.service.user.hdfs": "hdfs", 
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.plugins.hive.serviceuser": "hive", 
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
+            "ranger.plugins.kms.serviceuser": "kms", 
+            "ranger.service.https.attrib.ssl.enabled": "false", 
+            "ranger.sso.browser.useragent": "Mozilla,chrome", 
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
+            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.plugins.hdfs.serviceuser": "hdfs", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
+            "ranger.plugins.knox.serviceuser": "knox", 
+            "ranger.ldap.base.dn": "dc=example,dc=com", 
+            "ranger.sso.publicKey": "", 
+            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.service.https.attrib.clientAuth": "want", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
+            "ranger.ldap.group.roleattribute": "cn", 
+            "ranger.plugins.kafka.serviceuser": "kafka", 
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore", 
+            "ranger.service.http.port": "6080", 
+            "ranger.ldap.user.searchfilter": "(uid={0})", 
+            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.truststore.password": "changeit", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.password": "NONE", 
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr", 
+            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+            "ranger.service.https.port": "6182", 
+            "ranger.plugins.storm.serviceuser": "storm", 
+            "ranger.externalurl": "{{ranger_external_url}}", 
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.kms.service.user.hive": "", 
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
+            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure", 
+            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.username": "ranger_solr", 
+            "ranger.sso.enabled": "false", 
+            "ranger.audit.solr.urls": "", 
+            "ranger.ldap.ad.domain": "", 
+            "ranger.plugins.yarn.serviceuser": "yarn", 
+            "ranger.audit.source.type": "solr", 
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.authentication.method": "UNIX", 
+            "ranger.service.http.enabled": "true", 
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
+            "ranger.ldap.ad.referral": "ignore", 
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
+            "ranger.jpa.jdbc.password": "_", 
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "ranger.sso.providerurl": "", 
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
+            "ranger.admin.kerberos.token.valid.seconds": "30", 
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.unixauth.service.port": "5151"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
+        }, 
+        "tagsync-application-properties": {
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
+            "atlas.kafka.bootstrap.servers": "localhost:6667", 
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
+            "atlas.jaas.KafkaClient.option.storeKey": "true", 
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.sasl.kerberos.service.name": "kafka"
+        }, 
+        "ranger-env": {
+            "ranger_solr_shards": "1", 
+            "ranger_solr_config_set": "ranger_audits", 
+            "ranger_user": "ranger", 
+            "ranger_solr_replication_factor": "1",
+            "xml_configurations_supported": "true", 
+            "ranger-atlas-plugin-enabled": "No", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "ranger-yarn-plugin-enabled": "No", 
+            "bind_anonymous": "false", 
+            "ranger_admin_username": "amb_ranger_admin", 
+            "admin_password": "admin", 
+            "is_solrCloud_enabled": "true", 
+            "ranger-storm-plugin-enabled": "No", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "ranger_group": "ranger", 
+            "ranger-knox-plugin-enabled": "No", 
+            "ranger_admin_log_dir": "/var/log/ranger/admin", 
+            "ranger-kafka-plugin-enabled": "No", 
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
+            "ranger-hive-plugin-enabled": "No", 
+            "xasecure.audit.destination.solr": "true", 
+            "ranger_pid_dir": "/var/run/ranger", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "admin_username": "admin", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "create_db_dbuser": "true", 
+            "ranger_solr_collection_name": "ranger_audits", 
+            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_usersync_log_dir": "/var/log/ranger/usersync"
+        }, 
+        "usersync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "hadoop.rpc.protection": "authentication", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "kerberos-env": {
+            "kdc_hosts": "c6401.ambari.apache.org", 
+            "manage_auth_to_local": "true", 
+            "install_packages": "true", 
+            "realm": "EXAMPLE.COM", 
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
+            "kdc_create_attributes": "", 
+            "admin_server_host": "c6401.ambari.apache.org", 
+            "group": "ambari-managed-principals", 
+            "password_length": "20", 
+            "ldap_url": "", 
+            "manage_identities": "true", 
+            "password_min_lowercase_letters": "1", 
+            "create_ambari_principal": "true", 
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
+            "password_chat_timeout": "5", 
+            "kdc_type": "mit-kdc", 
+            "set_password_expiry": "false", 
+            "password_min_punctuation": "1", 
+            "container_dn": "", 
+            "case_insensitive_username_rules": "false", 
+            "password_min_whitespace": "0", 
+            "password_min_uppercase_letters": "1", 
+            "password_min_digits": "1"
+        }, 
+        "admin-properties": {
+            "db_user": "rangeradmin01", 
+            "DB_FLAVOR": "MYSQL", 
+            "db_password": "rangeradmin01", 
+            "db_root_user": "root", 
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
+            "db_name": "ranger01", 
+            "db_host": "c6401.ambari.apache.org", 
+            "db_root_password": "vagrant", 
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
+        }, 
+        "ranger-ugsync-site": {
+            "ranger.usersync.ldap.binddn": "", 
+            "ranger.usersync.policymgr.username": "rangerusersync", 
+            "ranger.usersync.policymanager.mockrun": "false", 
+            "ranger.usersync.group.searchbase": "", 
+            "ranger.usersync.ldap.bindalias": "testldapalias", 
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
+            "ranger.usersync.port": "5151", 
+            "ranger.usersync.pagedresultssize": "500", 
+            "ranger.usersync.group.memberattributename": "", 
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
+            "ranger.usersync.ldap.referral": "ignore", 
+            "ranger.usersync.group.searchfilter": "", 
+            "ranger.usersync.ldap.user.objectclass": "person", 
+            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
+            "ranger.usersync.ldap.user.searchfilter": "", 
+            "ranger.usersync.ldap.groupname.caseconversion": "none", 
+            "ranger.usersync.ldap.ldapbindpassword": "", 
+            "ranger.usersync.unix.minUserId": "500", 
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
+            "ranger.usersync.group.nameattribute": "", 
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
+            "ranger.usersync.user.searchenabled": "false", 
+            "ranger.usersync.group.usermapsyncenabled": "true", 
+            "ranger.usersync.ldap.bindkeystore": "", 
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
+            "ranger.usersync.group.objectclass": "", 
+            "ranger.usersync.ldap.user.searchscope": "sub", 
+            "ranger.usersync.unix.password.file": "/etc/passwd", 
+            "ranger.usersync.ldap.user.nameattribute": "", 
+            "ranger.usersync.pagedresultsenabled": "true", 
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
+            "ranger.usersync.group.search.first.enabled": "false", 
+            "ranger.usersync.group.searchenabled": "false", 
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
+            "ranger.usersync.ssl": "true", 
+            "ranger.usersync.ldap.url": "", 
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.ldap.user.searchbase": "", 
+            "ranger.usersync.ldap.username.caseconversion": "none", 
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.keystore.password": "UnIx529p", 
+            "ranger.usersync.unix.group.file": "/etc/group", 
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
+            "ranger.usersync.group.searchscope": "", 
+            "ranger.usersync.truststore.password": "changeit", 
+            "ranger.usersync.enabled": "true", 
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.filesource.text.delimiter": ","
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.content-summary.limit": "5000", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:1019", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.namenode.audit.log.async": "true", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.blocksize": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.namenode.fslock.fair": "false", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "50", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.datanode.http.address": "0.0.0.0:1022", 
+            "dfs.datanode.du.reserved": "33011188224", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.client.retry.policy.enabled": "false", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
+        }, 
+        "ranger-tagsync-site": {
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
+            "ranger.tagsync.source.atlasrest.username": "", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
+            "ranger.tagsync.source.file.check.interval.millis": "", 
+            "ranger.tagsync.source.atlasrest.endpoint": "", 
+            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
+            "ranger.tagsync.source.atlas": "false", 
+            "ranger.tagsync.source.atlasrest": "false", 
+            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.file.filename": ""
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/grid/0/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
+        }, 
+        "krb5-conf": {
+            "domains": "", 
+            "manage_krb5_conf": "true", 
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "conf_dir": "/etc"
+        }, 
+        "core-site": {
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "fs.trash.interval": "360", 
+            "ipc.server.tcpnodelay": "true", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.security.authentication": "kerberos", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.HTTP.groups": "users", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
+            "hadoop.security.authorization": "true", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "hadoop-env": {
+            "keyserver_port": "", 
+            "proxyuser_group": "users", 
+            "hdfs_user_nproc_limit": "65536", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user_nofile_limit": "128000", 
+            "hdfs_user": "hdfs", 
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
+            "keyserver_host": " ", 
+            "namenode_opt_maxnewsize": "128m", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "128m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_tmp_dir": "/tmp"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "ssl-server": {
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
+            "ssl.server.keystore.keypassword": "bigdata", 
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
+            "ssl.server.keystore.password": "bigdata", 
+            "ssl.server.truststore.password": "bigdata", 
+            "ssl.server.truststore.type": "jks", 
+            "ssl.server.keystore.type": "jks", 
+            "ssl.server.truststore.reload.interval": "10000"
+        }, 
+        "ranger-site": {}, 
+        "admin-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
+        }, 
+        "tagsync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
+        }, 
+        "ranger-hdfs-security": {
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "xasecure.add-hadoop-authorization": "true"
+        }, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {
+            "zk_server_heapsize": "1024m", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
+        },
+        "infra-solr-env": {
+            "infra_solr_datadir": "/opt/logsearch_solr/data",
+            "infra_solr_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks",
+            "infra_solr_kerberos_name_rules": "DEFAULT",
+            "infra_solr_user": "infra-solr",
+            "infra_solr_maxmem": "1024",
+            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME={{java64_home}}\n\n# Increase Java Min/Max Heap as needed to support your indexing / query needs\nSOLR_JAVA_MEM=\"-Xms{{logsearch_solr_min_mem}}m -Xmx{{logsearch_solr_max_mem}}m\"\n\n# Enable verbose GC logging\nGC_LOG_OPTS=\"-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \\\n-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime\"\n\n# These GC settings have shown to work well for a number of common Solr workloads\nGC_TUNE=\"-XX:NewRatio=3 \\\n-XX:SurvivorRatio=4 \\\n-XX:TargetSurvivorRatio=90 \\\n-XX:MaxTenuringThreshold=8 \\\n-XX:+UseConcMarkSweepGC \\\n-XX:+UseParNewGC \\\n-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \\\n-XX:+CMSScavengeBeforeRemark \\\n-XX:PretenureSizeThreshold=64m \\\n-XX:+UseCMSInitiatingOccupancyOnly \\\n-XX:CMSInitiatingOccupancyFraction=50 \\\n-XX:CMSMaxAbortablePrecleanTime=6000 \\\n-XX:+CMSParallelRemarkEnabled \\\n-XX:+ParallelRefProcEnabled\"\
 n\n# Set the ZooKeeper connection string if using an external ZooKeeper ensemble\n# e.g. host1:2181,host2:2181/chroot\n# Leave empty if not using SolrCloud\nZK_HOST=\"{{zookeeper_quorum}}{{logsearch_solr_znode}}\"\n\n# Set the ZooKeeper client timeout (for SolrCloud mode)\nZK_CLIENT_TIMEOUT=\"60000\"\n\n# By default the start script uses \"localhost\"; override the hostname here\n# for production SolrCloud environments to control the hostname exposed to cluster state\n#SOLR_HOST=\"192.168.1.1\"\n\n# By default the start script uses UTC; override the timezone if needed\n#SOLR_TIMEZONE=\"UTC\"\n\n# Set to true to activate the JMX RMI connector to allow remote JMX client applications\n# to monitor the JVM hosting Solr; set to \"false\" to disable that behavior\n# (false is recommended in production environments)\nENABLE_REMOTE_JMX_OPTS=\"true\"\n\n# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here\nRMI_PORT={{logsearch_solr_jmx_port}}\n\n# Anything you add to
  the SOLR_OPTS variable will be included in the java\n# start command line as-is, in ADDITION to other options. If you specify the\n# -a option on start script, those options will be appended as well. Examples:\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.clustering.enabled=true\"\n\n# Location where the bin/solr script will save PID files for running instances\n# If not set, the script will create PID files in $SOLR_TIP/bin\nSOLR_PID_DIR={{logsearch_solr_piddir}}\n\n# Path to a directory where Solr creates index files, the specified directory\n# must contain a solr.xml; by default, Solr will use server/solr\nSOLR_HOME={{logsearch_solr_datadir}}\n\n# Solr provides a default Log4J configuration properties file in server/resources\n# however, you may want to customize the log settings and file appender location\n# so you can point the script to use a different log4j.properties f
 ile\nLOG4J_PROPS={{logsearch_solr_conf}}/log4j.properties\n\n# Location where Solr should write logs to; should agree with the file appender\n# settings in server/resources/log4j.properties\nSOLR_LOGS_DIR={{logsearch_solr_log_dir}}\n\n# Sets the port Solr binds to, default is 8983\nSOLR_PORT={{logsearch_solr_port}}\n\n# Be sure to update the paths to the correct keystore for your environment\n{% if logsearch_solr_ssl_enabled %}\nSOLR_SSL_KEY_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_KEY_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_TRUST_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_TRUST_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_NEED_CLIENT_AUTH=false\nSOLR_SSL_WANT_CLIENT_AUTH=false\n{% endif %}\n\n# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set\n# and you are using SSL, then the start script will use SOLR_PORT for the SSL port\n#SOLR_SSL_PORT=\n\n{% if security_enabled -%}\nSOLR_HOST=`hostname -f`\nSOLR
 _JAAS_FILE={{logsearch_solr_jaas_file}}\nSOLR_KERB_KEYTAB={{logsearch_solr_web_kerberos_keytab}}\nSOLR_KERB_PRINCIPAL={{logsearch_solr_web_kerberos_principal}}\nSOLR_KERB_NAME_RULES={{logsearch_solr_kerberos_name_rules}}\n\nSOLR_AUTHENTICATION_CLIENT_CONFIGURER=\"org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer\"\nSOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST} -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}\"\n{% endif %}", 
+            "infra_solr_pid_dir": "/var/run/ambari-infra-solr",
+            "infra_solr_truststore_password": "bigdata",
+            "infra_solr_truststore_type": "jks",
+            "infra_solr_keystore_type": "jks",
+            "infra_solr_log_dir": "/var/log/ambari-infra-solr",
+            "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "infra_solr_ssl_enabled": "false",
+            "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client",
+            "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM",
+            "infra_solr_znode": "/ambari-solr",
+            "infra_solr_keystore_password": "bigdata",
+            "infra_solr_port": "8886",
+            "infra_solr_kerberos_principal": "infra-solr/_HOST@EXAMPLE.COM",
+            "infra_solr_jmx_port": "18886",
+            "infra_solr_truststore_location": "/etc/security/serverKeys/infra-solr.trustStore.jks",
+            "infra_solr_minmem": "512",
+            "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra-solr.service.keytab"
+        },
+        "infra-solr-client-log4j": {
+            "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+            "content": "# Copyright 2011 The Apache Software Foundation\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nlog4j.rootLogger=INFO,file,stdout,stderr\n\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender
 .file.File={{logsearch_solr_client_log}}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.Threshold=INFO\nlog4j.appender.stdout.Target=System.out\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%m%n\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.Threshold=ERROR\nlog4j.appender.stderr.Target=System.err\nlog4j.appender.stderr.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stderr.layout.ConversionPattern=%m%n"
+        },
+        "cluster-env": {
+            "security_enabled": "true", 
+            "override_uid": "true", 
+            "fetch_nonlocal_groups": "true", 
+            "one_dir_per_partition": "true", 
+            "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", 
+            "ignore_groupsusers_create": "false", 
+            "alerts_repeat_tolerance": "1", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "manage_dirs_on_root": "true",
+            "recovery_lifetime_max_count": "1024", 
+            "recovery_type": "AUTO_START", 
+            "ignore_bad_mounts": "false", 
+            "recovery_window_in_minutes": "60", 
+            "user_group": "hadoop", 
+            "stack_tools": "{\n  \"stack_selector\": [\"hdp-select\", \"/usr/bin/hdp-select\", \"hdp-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}", 
+            "recovery_retry_interval": "5", 
+            "stack_features": "{\n  \"stack_features\": [\n    {\n      \"name\": \"snappy\",\n      \"description\": \"Snappy compressor/decompressor support\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"lzo\",\n      \"description\": \"LZO libraries support\",\n      \"min_version\": \"2.2.1.0\"\n    },\n    {\n      \"name\": \"express_upgrade\",\n      \"description\": \"Express upgrade support\",\n      \"min_version\": \"2.1.0.0\"\n    },\n    {\n      \"name\": \"rolling_upgrade\",\n      \"description\": \"Rolling upgrade support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"config_versioning\",\n      \"description\": \"Configurable versions support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"datanode_non_root\",\n      \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\
 ": \"remove_ranger_hdfs_plugin_env\",\n      \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger\",\n      \"description\": \"Ranger Service support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_tagsync_component\",\n      \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"phoenix\",\n      \"description\": \"Phoenix Service support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"nfs\",\n      \"description\": \"NFS support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"tez_for_spark\",\n      \"description\": \"Tez dependency for Spark\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"timeline_state_store\",\n      \"description\": \"Yarn application timeline-se
 rvice supports state store property (AMBARI-11442)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"copy_tarball_to_hdfs\",\n      \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"spark_16plus\",\n      \"description\": \"Spark 1.6+\",\n      \"min_version\": \"2.4.0.0\"\n    },\n    {\n      \"name\": \"spark_thriftserver\",\n      \"description\": \"Spark Thrift Server\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"storm_kerberos\",\n      \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"storm_ams\",\n      \"description\": \"Storm AMS integration (AMBARI-10710)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"create_kafka_broker_id\",\n      \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n      \"min_version\": \"2.2
 .0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_listeners\",\n      \"description\": \"Kafka listeners (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_kerberos\",\n      \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"pig_on_tez\",\n      \"description\": \"Pig on Tez support (AMBARI-7863)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_non_root\",\n      \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger_audit_db_support\",\n      \"description\": \"Ranger Audit to DB support\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"accumulo_kerberos_user_auth\",\n      \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n
       \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"knox_versioned_data_dir\",\n      \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"knox_sso_topology\",\n      \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n      \"min_version\": \"2.3.8.0\"\n    },\n    {\n      \"name\": \"atlas_rolling_upgrade\",\n      \"description\": \"Rolling upgrade support for Atlas\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"oozie_admin_user\",\n      \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_create_hive_tez_configs\",\n      \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_setup_shared_lib\",\n      \"description\": \"Oozie 
 setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_host_kerberos\",\n      \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"falcon_extensions\",\n      \"description\": \"Falcon Extension\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_upgrade_schema\",\n      \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server_interactive\",\n      \"description\": \"Hive server interactive support (AMBARI-15573)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_webhcat_specific_configs\",\n      \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n      \"min_ver
 sion\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_purge_table\",\n      \"description\": \"Hive purge table support (AMBARI-12260)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server2_kerberized_env\",\n      \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n      \"min_version\": \"2.2.3.0\",\n      \"max_version\": \"2.2.5.0\"\n    },\n    {\n      \"name\": \"hive_env_heapsize\",\n      \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_hsm_support\",\n      \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_log4j_support\",\n      \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kerberos_support\",\n      \"desc
 ription\": \"Ranger Kerberos support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_site_support\",\n      \"description\": \"Hive Metastore site support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_password_jceks\",\n      \"description\": \"Saving Ranger Usersync credentials in jceks\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_install_infra_client\",\n      \"description\": \"LogSearch Service support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hbase_home_directory\",\n      \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"spark_livy\",\n      \"description\": \"Livy as slave component of spark\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"atlas_ranger_plugin_support\",\n      \"description\": \"Atlas Ranger plug
 in support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_pid_support\",\n      \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_pid_support\",\n      \"description\": \"Ranger KMS Service support pid generation\",\n      \"min_version\": \"2.5.0.0\"\n    }\n  ]\n}",
+            "recovery_enabled": "true", 
+            "smokeuser_principal_name": "ambari-qa-test_cluster01@EXAMPLE.COM", 
+            "recovery_max_count": "6", 
+            "stack_root": "/usr/hdp", 
+            "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0", 
+            "ambari_principal_name": "ambari-server-test_cluster01@EXAMPLE.COM", 
+            "managed_hdfs_resource_property_names": "", 
+            "smokeuser": "ambari-qa"
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/test_stack_adviser.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/test_stack_adviser.py b/ambari-server/src/test/python/stacks/test_stack_adviser.py
new file mode 100644
index 0000000..8146a0c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/test_stack_adviser.py
@@ -0,0 +1,239 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+from unittest import TestCase
+
+
+class TestBasicAdvisor(TestCase):
+  def setUp(self):
+    import imp
+    self.maxDiff = None
+    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+    stackAdvisorPath = os.path.abspath(os.path.join(self.testDirectory, '../../../main/resources/stacks/stack_advisor.py'))
+
+    default_sa_classname = 'DefaultStackAdvisor'
+
+    with open(stackAdvisorPath, 'rb') as fp:
+      stack_advisor_impl = imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+    clazz = getattr(stack_advisor_impl, default_sa_classname)
+    self.stackAdvisor = clazz()
+
+  def test_filterHostMounts(self):
+
+    filtered_mount = "/data"
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant1", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": filtered_mount
+          }
+        }
+      }
+    }
+
+    filtered_hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+    for host in filtered_hosts["items"]:
+      self.assertEquals(False, filtered_mount in host["Hosts"]["disk_info"])
+
+  def test_getMountPathVariations(self):
+
+    filtered_mount = "/data"
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant1", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": filtered_mount
+          }
+        }
+      }
+    }
+
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+    avail_mounts = self.stackAdvisor.getMountPathVariations("/test/folder", "DATANODE", services, hosts)
+
+    self.assertEquals(True, avail_mounts is not None)
+    self.assertEquals(1, len(avail_mounts))
+    self.assertEquals("/test/folder", avail_mounts[0])
+
+  def test_updateMountProperties(self):
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": "/data", "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/data", "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": ""
+          }
+        },
+        "some-site": {
+          "path_prop": "/test"
+        }
+      }
+    }
+
+    pathProperties = [
+      ("path_prop", "DATANODE", "/test", "multi"),
+    ]
+
+    configurations = {}
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+    self.stackAdvisor.updateMountProperties("some-site", pathProperties, configurations, services, hosts)
+
+    self.assertEquals("/test,/data/test", configurations["some-site"]["properties"]["path_prop"])


[13/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
index 8ccfa47..34cea4c 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
@@ -1,469 +1,21 @@
 {
   "HDFS":{
-    "service": [
-      {
-        "name": "datanode_process_percent",
-        "label": "Percent DataNodes Available",
-        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      },
-      {
-        "name": "datanode_storage_percent",
-        "label": "Percent DataNodes With Available Space",
-        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_storage",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      },
-      {
-        "name": "journalnode_process_percent",
-        "label": "Percent JournalNodes Available",
-        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "journalnode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 33
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 50
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }
-    ],
     "NAMENODE": [
-      {
-        "name": "namenode_webui",
-        "label": "NameNode Web UI",
-        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
+
       {
         "name": "upgrade_finalized_state",
         "label": "HDFS Upgrade Finalized State",
         "description": "This service-level alert is triggered if HDFS is not in the finalized state",
         "interval": 1,
-        "scope": "SERVICE",
+        "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py",
+          "path": "PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py",
           "parameters": []
         }
       },
-      {
-        "name": "namenode_cpu",
-        "label": "NameNode Host CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_blocks_health",
-        "label": "NameNode Blocks Health",
-        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
-            },
-            "warning": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },
-            "units" : "Blocks"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
-              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_pending_deletion_blocks",
-        "label": "HDFS Pending Deletion Blocks",
-        "description": "This service-level alert is triggered if the number of blocks pending deletion in HDFS exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the PendingDeletionBlock property.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Pending Deletion Blocks:[{0}]"
-            },
-            "warning": {
-              "text": "Pending Deletion Blocks:[{0}]",
-              "value": 100000
-            },
-            "critical": {
-              "text": "Pending Deletion Blocks:[{0}]",
-              "value": 100000
-            },
-            "units" : "Blocks"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystem/PendingDeletionBlocks"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_capacity_utilization",
-        "label": "HDFS Capacity Utilization",
-        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]"
-            },
-            "warning": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
-              "value": 75
-            },          
-            "critical": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
-              "value": 80
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
-            ],
-            "value": "{0}/({0} + {1}) * 100.0"
-          }
-        }
-      },
-      {
-        "name": "namenode_rpc_latency",
-        "label": "NameNode RPC Latency",
-        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_directory_status",
-        "label": "NameNode Directory Status",
-        "description": "This host-level alert is triggered if the NameNode NameDirStatuses metric (name=NameNodeInfo/NameDirStatuses) reports a failed directory. The threshold values are in the number of directories that are not healthy.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Directories are healthy"
-            },
-            "warning": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },
-            "units" : "Dirs"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
-            ],
-            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
-          }
-        }
-      },
-      {
-        "name": "datanode_health_summary",
-        "label": "DataNode Health Summary",
-        "description": "This service-level alert is triggered if there are unhealthy DataNodes",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key": "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern": "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern": "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "All {2} DataNode(s) are healthy"
-            },
-            "warning": {
-              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
-              "value": 1
-            },
-            "critical": {
-              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
-              "value": 1
-            },
-            "units": "DNs"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumDeadDataNodes",
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumStaleDataNodes",
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumLiveDataNodes"
-            ],
-            "value": "{0} + {1}"
-          }
-        }
-      },
+
       {
         "name": "namenode_last_checkpoint",
         "label": "NameNode Last Checkpoint",
@@ -473,7 +25,7 @@
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py",
+          "path": "PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py",
           "parameters": [
             {
               "name": "connection.timeout",
@@ -520,1265 +72,47 @@
             }
           ]
         }
-      },
+      }
+    ],
+    "SECONDARY_NAMENODE": [
       {
-        "name": "namenode_ha_health",
-        "label": "NameNode High Availability Health",
-        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
+        "name": "secondary_namenode_process",
+        "label": "Secondary NameNode Process",
+        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
         "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "ignore_host": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_queue_latency_hourly",
-        "label": "NameNode Service RPC Queue Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC queue latency on datanode port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_queue_latency_hourly",
-        "label": "NameNode Client RPC Queue Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC queue latency on client port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
+        "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
+          "path": "PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py"
         }
-      },
+      }
+    ],
+    "NFS_GATEWAY": [
       {
-        "name": "namenode_service_rpc_processing_latency_hourly",
-        "label": "NameNode Service RPC Processing Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
+        "name": "nfsgateway_process",
+        "label": "NFS Gateway Process",
+        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
+          "path": "PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py"
         }
-      },
+      }
+    ],
+    "DATANODE": [
       {
-        "name": "namenode_client_rpc_processing_latency_hourly",
-        "label": "NameNode Client RPC Processing Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
+        "name": "datanode_unmounted_data_dir",
+        "label": "DataNode Unmounted Data Dir",
+        "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
+        "interval": 1,
+        "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "increase_nn_heap_usage_daily",
-        "label": "NameNode Heap Usage (Daily)",
-        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "jvm.JvmMetrics.MemHeapUsedM",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Heap",
-              "value": 100,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum heap increase in a day."
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_processing_latency_daily",
-        "label": "NameNode Service RPC Processing Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_processing_latency_daily",
-        "label": "NameNode Client RPC Processing Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_queue_latency_daily",
-        "label": "NameNode Service RPC Queue Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_queue_latency_daily",
-        "label": "NameNode Client RPC Queue Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_increase_in_storage_capacity_usage_daily",
-        "label": "HDFS Storage Capacity Usage (Daily)",
-        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "dfs.FSNamesystem.CapacityUsed",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 30,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "B",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Capacity",
-              "value": 100,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum capacity increase in a day."
-            }
-          ]
-        }
-      },
-      {
-        "name": "increase_nn_heap_usage_weekly",
-        "label": "NameNode Heap Usage (Weekly)",
-        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a week period.",
-        "interval": 1440,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 10080,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "jvm.JvmMetrics.MemHeapUsedM",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Heap",
-              "value": 1000,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum heap increase in a week."
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_increase_in_storage_capacity_usage_weekly",
-        "label": "HDFS Storage Capacity Usage (Weekly)",
-        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a week period.",
-        "interval": 1440,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 10080,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "dfs.FSNamesystem.CapacityUsed",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 10,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "B",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Capacity",
-              "value": 1000,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum capacity increase in a week."
-            }
-          ]
-        }
-      }
-    ],
-    "SECONDARY_NAMENODE": [
-      {
-        "name": "secondary_namenode_process",
-        "label": "Secondary NameNode Process",
-        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.secondary.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ],
-    "NFS_GATEWAY": [
-      {
-        "name": "nfsgateway_process",
-        "label": "NFS Gateway Process",
-        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/nfs.server.port}}",
-          "default_port": 2049,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "JOURNALNODE": [
-      {
-        "name": "journalnode_process",
-        "label": "JournalNode Web UI",
-        "description": "This host-level alert is triggered if the JournalNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.journalnode.http-address}}",
-            "https": "{{hdfs-site/dfs.journalnode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning": {
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ],      
-    "DATANODE": [
-      {
-        "name": "datanode_process",
-        "label": "DataNode Process",
-        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",        
-          "uri": "{{hdfs-site/dfs.datanode.address}}",
-          "default_port": 50010,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "datanode_webui",
-        "label": "DataNode Web UI",
-        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },    
-      {
-        "name": "datanode_storage",
-        "label": "DataNode Storage",
-        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]"
-            },
-            "warning": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
-              "value": 75
-            },
-            "critical": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
-              "value": 80
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
-            ],
-            "value": "({1} - {0})/{1} * 100.0"
-          }
-        }
-      },
-      {
-        "name": "datanode_unmounted_data_dir",
-        "label": "DataNode Unmounted Data Dir",
-        "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py"
-        }
-      },
-      {
-        "name": "datanode_heap_usage",
-        "label": "DataNode Heap Usage",
-        "description": "This host-level alert is triggered if heap usage goes past thresholds on the DataNode. It checks the DataNode JMXServlet for the MemHeapUsedM and MemHeapMaxM properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB"
-            },
-            "warning": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
-              "value": 80
-            },
-            "critical": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
-              "value": 90
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapUsedM",
-              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapMaxM"
-            ],
-            "value": "100.0 - (({1} - {0})/{1} * 100.0)"
-          }
-        }
-      }
-    ],
-    "ZKFC": [
-      {
-        "name": "hdfs_zookeeper_failover_controller_process",
-        "label": "ZooKeeper Failover Controller Process",
-        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/dfs.ha.zkfc.port}}",
-          "default_port": 8019,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
+          "path": "PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py"
         }
       }
     ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml
index bf26fbd..42edc8d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/metainfo.xml
@@ -227,6 +227,7 @@
       </requiredServices>
 
       <configuration-dependencies>
+        <config-type>hdfs-alert-config</config-type>
         <config-type>core-site</config-type>
         <config-type>hdfs-site</config-type>
         <config-type>hadoop-env</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
index 1e69def..73bc168 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
@@ -20,36 +20,31 @@ limitations under the License.
 
 import logging
 
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
 
-OK_MESSAGE = 'Ok'
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
 
-HDFS_SITE_KEY = '{{hdfs-site}}'
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
 
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
-NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
 
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
 
 logger = logging.getLogger('ambari_alerts')
 
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
 def get_tokens():
   """
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
-      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
-  
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
 
 def execute(configurations={}, parameters={}, host_name=None):
   """
@@ -61,9 +56,4 @@ def execute(configurations={}, parameters={}, host_name=None):
   host_name (string): the name of this host where the alert is running
   """
 
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  result_code = RESULT_CODE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])
\ No newline at end of file
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
index 8c122ae..73bc168 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
@@ -20,24 +20,30 @@ limitations under the License.
 
 import logging
 
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_WARNING = 'WARNING'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
 
-OK_MESSAGE = 'Ok'
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
 
-DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}'
-DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
 
-logger = logging.getLogger()
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
 
 def get_tokens():
   """
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (DFS_DATA_DIR, DATA_DIR_MOUNT_FILE)
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -48,27 +54,6 @@ def execute(configurations={}, parameters={}, host_name=None):
   configurations (dictionary): a mapping of configuration key to value
   parameters (dictionary): a mapping of script parameter key to value
   host_name (string): the name of this host where the alert is running
-
-  DataNode directories can be of the following formats and each needs to be supported:
-    /grid/dn/archive0
-    [SSD]/grid/dn/archive0
-    [ARCHIVE]file:///grid/dn/archive0
   """
-  warnings = []
-  errors = []
-
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # Check required properties
-  if DFS_DATA_DIR not in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)])
-
-  dfs_data_dir = configurations[DFS_DATA_DIR]
-
-  if dfs_data_dir is None:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)])
 
-  result_code = RESULT_STATE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])
\ No newline at end of file
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
deleted file mode 100644
index 7f03d88..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-OK_MESSAGE = 'Ok'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
-  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
-  
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # if not in HA mode, then SKIP
-  if not NAMESERVICE_KEY in configurations:
-    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  result_code = RESULT_STATE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])


[21/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 7c2f2df..65487b7 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -18,19 +18,24 @@ limitations under the License.
 
 """
 import httplib
+
+from ambari_commons.parallel_processing import PrallelProcessResult, execute_in_parallel, SUCCESS
+from service_check import post_metrics_to_collector
 from resource_management.core.logger import Logger
 from resource_management.core.base import Fail
 from resource_management import Template
 from collections import namedtuple
 from urlparse import urlparse
 from base64 import b64encode
+import random
 import time
 import socket
 import ambari_simplejson as json
 import network
+import os
 
-GRAFANA_CONNECT_TRIES = 5
-GRAFANA_CONNECT_TIMEOUT = 10
+GRAFANA_CONNECT_TRIES = 15
+GRAFANA_CONNECT_TIMEOUT = 20
 GRAFANA_SEARCH_BUILTIN_DASHBOARDS = "/api/search?tag=builtin"
 GRAFANA_DATASOURCE_URL = "/api/datasources"
 GRAFANA_DASHBOARDS_URL = "/api/dashboards/db"
@@ -171,20 +176,32 @@ def perform_grafana_delete_call(url, server):
 
   return response
 
-def is_unchanged_datasource_url(datasource_url):
+def is_unchanged_datasource_url(grafana_datasource_url, new_datasource_host):
   import params
-  parsed_url = urlparse(datasource_url)
+  parsed_url = urlparse(grafana_datasource_url)
   Logger.debug("parsed url: scheme = %s, host = %s, port = %s" % (
     parsed_url.scheme, parsed_url.hostname, parsed_url.port))
   Logger.debug("collector: scheme = %s, host = %s, port = %s" %
-              (params.metric_collector_protocol, params.metric_collector_host,
+              (params.metric_collector_protocol, new_datasource_host,
                params.metric_collector_port))
 
   return parsed_url.scheme.strip() == params.metric_collector_protocol.strip() and \
-         parsed_url.hostname.strip() == params.metric_collector_host.strip() and \
+         parsed_url.hostname.strip() == new_datasource_host.strip() and \
          str(parsed_url.port) == params.metric_collector_port
 
+def do_ams_collector_post(metric_collector_host, params):
+    ams_metrics_post_url = "/ws/v1/timeline/metrics/"
+    random_value1 = random.random()
+    headers = {"Content-type": "application/json"}
+    ca_certs = os.path.join(params.ams_collector_conf_dir,
+                            params.metric_truststore_ca_certs)
+
+    current_time = int(time.time()) * 1000
+    metric_json = Template('smoketest_metrics.json.j2', hostname=params.hostname, random1=random_value1,
+                           current_time=current_time).get_content()
 
+    post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, params.metric_collector_port, params.metric_collector_https_enabled,
+                                metric_json, headers, ca_certs)
 def create_ams_datasource():
   import params
   server = Server(protocol = params.ams_grafana_protocol.strip(),
@@ -196,11 +213,28 @@ def create_ams_datasource():
   """
   Create AMS datasource in Grafana, if exsists make sure the collector url is accurate
   """
-  ams_datasource_json = Template('metrics_grafana_datasource.json.j2',
-                                 ams_datasource_name=METRICS_GRAFANA_DATASOURCE_NAME).get_content()
+  Logger.info("Trying to find working metric collector")
+  results = execute_in_parallel(do_ams_collector_post, params.ams_collector_hosts.split(','), params)
+  new_datasource_host = ""
+
+  for host in params.ams_collector_hosts:
+    if host in results:
+      if results[host].status == SUCCESS:
+        new_datasource_host = host
+        Logger.info("Found working collector on host %s" % new_datasource_host)
+        break
+      else:
+        Logger.warning(results[host].result)
 
-  Logger.info("Checking if AMS Grafana datasource already exists")
+  if new_datasource_host == "":
+    Logger.warning("All metric collectors are unavailable. Will use random collector as datasource host.")
+    new_datasource_host = params.random_metric_collector_host
 
+  Logger.info("New datasource host will be %s" % new_datasource_host)
+
+  ams_datasource_json = Template('metrics_grafana_datasource.json.j2',
+                            ams_datasource_name=METRICS_GRAFANA_DATASOURCE_NAME, ams_datasource_host=new_datasource_host).get_content()
+  Logger.info("Checking if AMS Grafana datasource already exists")
 
   response = perform_grafana_get_call(GRAFANA_DATASOURCE_URL, server)
   create_datasource = True
@@ -215,7 +249,7 @@ def create_ams_datasource():
         Logger.info("Ambari Metrics Grafana datasource already present. Checking Metrics Collector URL")
         datasource_url = datasources_json[i]["url"]
 
-        if is_unchanged_datasource_url(datasource_url):
+        if is_unchanged_datasource_url(datasource_url, new_datasource_host):
           Logger.info("Metrics Collector URL validation succeeded.")
           return
         else: # Metrics datasource present, but collector host is wrong.
@@ -359,4 +393,3 @@ def create_ams_dashboards():
     pass
 
 
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index e278d0d..ad66ffe 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -50,10 +50,16 @@ pass
 
 #AMBARI_METRICS data
 ams_pid_dir = status_params.ams_collector_pid_dir
-
+is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
 ams_collector_script = "/usr/sbin/ambari-metrics-collector"
 ams_collector_pid_dir = status_params.ams_collector_pid_dir
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+ams_collector_list = default("/clusterHostInfo/metrics_collector_hosts", [])
+embedded_mode_multiple_instances = False
+
+if not is_ams_distributed and len(ams_collector_list) > 1:
+  embedded_mode_multiple_instances = True
+
 failover_strategy_blacklisted_interval_seconds = default("/configurations/ams-env/failover_strategy_blacklisted_interval", "600")
 failover_strategy = default("/configurations/ams-site/failover.strategy", "round-robin")
 if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
@@ -118,6 +124,9 @@ if 'cluster-env' in config['configurations'] and \
   metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
 else:
   metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
+
+random_metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
+
 if 'cluster-env' in config['configurations'] and \
     'metrics_collector_vip_port' in config['configurations']['cluster-env']:
   metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
@@ -172,7 +181,6 @@ hbase_pid_dir = status_params.hbase_pid_dir
 
 is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
 is_local_fs_rootdir = hbase_root_dir.startswith('file://')
-is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
 
 # security is disabled for embedded mode, when HBase is backed by file
 security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
@@ -227,15 +235,18 @@ else:
 max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
 hostname = config["hostname"]
 
+cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
+if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+  cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+else:
+  cluster_zookeeper_clientPort = '2181'
+
 if not is_hbase_distributed:
   zookeeper_quorum_hosts = hostname
   zookeeper_clientPort = '61181'
 else:
-  zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
+  zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
+  zookeeper_clientPort = cluster_zookeeper_clientPort
 
 ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
 _hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
@@ -337,4 +348,3 @@ HdfsResource = functools.partial(
  )
 
 
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
index ddd3e42..1417f4a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
@@ -25,6 +25,7 @@ from resource_management import Template
 
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons.parallel_processing import PrallelProcessResult, execute_in_parallel, SUCCESS
 
 import httplib
 import network
@@ -39,10 +40,10 @@ import socket
 class AMSServiceCheck(Script):
   AMS_METRICS_POST_URL = "/ws/v1/timeline/metrics/"
   AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
-  AMS_CONNECT_TRIES = 30
-  AMS_CONNECT_TIMEOUT = 15
-  AMS_READ_TRIES = 10
-  AMS_READ_TIMEOUT = 5
+  AMS_CONNECT_TRIES = 10
+  AMS_CONNECT_TIMEOUT = 10
+  AMS_READ_TRIES = 5
+  AMS_READ_TIMEOUT = 10
 
   @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
   def service_check(self, env):
@@ -62,124 +63,139 @@ class AMSServiceCheck(Script):
       if not check_windows_service_exists(params.ams_collector_win_service_name):
         raise Fail("Metrics Collector service was not properly installed. Check the logs and retry the installation.")
 
-  @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-  def service_check(self, env):
-    import params
-
-    Logger.info("Ambari Metrics service check was started.")
-    env.set_params(params)
-
+  def service_check_for_single_host(self, metric_collector_host, params):
     random_value1 = random.random()
     headers = {"Content-type": "application/json"}
     ca_certs = os.path.join(params.ams_collector_conf_dir,
                             params.metric_truststore_ca_certs)
 
-    for i in xrange(0, self.AMS_CONNECT_TRIES):
-      try:
-        current_time = int(time.time()) * 1000
-        metric_json = Template('smoketest_metrics.json.j2', hostname=params.hostname, random1=random_value1,
+    current_time = int(time.time()) * 1000
+    metric_json = Template('smoketest_metrics.json.j2', hostname=params.hostname, random1=random_value1,
                            current_time=current_time).get_content()
-        Logger.info("Generated metrics:\n%s" % metric_json)
-
-        Logger.info("Connecting (POST) to %s:%s%s" % (params.metric_collector_host,
-                                                      params.metric_collector_port,
-                                                      self.AMS_METRICS_POST_URL))
-        conn = network.get_http_connection(params.metric_collector_host,
+    try:
+      post_metrics_to_collector(self.AMS_METRICS_POST_URL, metric_collector_host, params.metric_collector_port, params.metric_collector_https_enabled,
+                                metric_json, headers, ca_certs, self.AMS_CONNECT_TRIES, self.AMS_CONNECT_TIMEOUT)
+
+      get_metrics_parameters = {
+        "metricNames": "AMBARI_METRICS.SmokeTest.FakeMetric",
+        "appId": "amssmoketestfake",
+        "hostname": params.hostname,
+        "startTime": current_time - 60000,
+        "endTime": current_time + 61000,
+        "precision": "seconds",
+        "grouped": "false",
+      }
+      encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
+
+      Logger.info("Connecting (GET) to %s:%s%s" % (metric_collector_host,
+                                                   params.metric_collector_port,
+                                                   self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
+      for i in xrange(0, self.AMS_READ_TRIES):
+        conn = network.get_http_connection(metric_collector_host,
                                            int(params.metric_collector_port),
                                            params.metric_collector_https_enabled,
                                            ca_certs)
-        conn.request("POST", self.AMS_METRICS_POST_URL, metric_json, headers)
-
+        conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
         response = conn.getresponse()
-        Logger.info("Http response: %s %s" % (response.status, response.reason))
-      except (httplib.HTTPException, socket.error) as ex:
-        if i < self.AMS_CONNECT_TRIES - 1:  #range/xrange returns items from start to end-1
-          time.sleep(self.AMS_CONNECT_TIMEOUT)
-          Logger.info("Connection failed. Next retry in %s seconds."
-                      % (self.AMS_CONNECT_TIMEOUT))
-          continue
-        else:
-          raise Fail("Metrics were not saved. Service check has failed. "
-               "\nConnection failed.")
+        Logger.info("Http response for host %s : %s %s" % (metric_collector_host, response.status, response.reason))
 
-      data = response.read()
-      Logger.info("Http data: %s" % data)
-      conn.close()
+        data = response.read()
+        Logger.info("Http data: %s" % data)
+        conn.close()
 
-      if response.status == 200:
-        Logger.info("Metrics were saved.")
-        break
-      else:
-        Logger.info("Metrics were not saved. Service check has failed.")
-        if i < self.AMS_CONNECT_TRIES - 1:  #range/xrange returns items from start to end-1
-          time.sleep(self.AMS_CONNECT_TIMEOUT)
-          Logger.info("Next retry in %s seconds."
-                      % (self.AMS_CONNECT_TIMEOUT))
+        if response.status == 200:
+          Logger.info("Metrics were retrieved from host %s" % metric_collector_host)
+        else:
+          raise Fail("Metrics were not retrieved from host %s. GET request status: %s %s \n%s" %
+                     (metric_collector_host, response.status, response.reason, data))
+        data_json = json.loads(data)
+
+        def floats_eq(f1, f2, delta):
+          return abs(f1-f2) < delta
+
+        values_are_present = False
+        for metrics_data in data_json["metrics"]:
+          if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"]
+              and floats_eq(metrics_data["metrics"][str(current_time)], random_value1, 0.0000001)
+              and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)):
+            Logger.info("Values %s and %s were found in the response from host %s." % (metric_collector_host, random_value1, current_time))
+            values_are_present = True
+            break
+            pass
+
+        if not values_are_present:
+          if i < self.AMS_READ_TRIES - 1:  #range/xrange returns items from start to end-1
+            Logger.info("Values weren't stored yet. Retrying in %s seconds."
+                        % (self.AMS_READ_TIMEOUT))
+            time.sleep(self.AMS_READ_TIMEOUT)
+          else:
+            raise Fail("Values %s and %s were not found in the response." % (random_value1, current_time))
         else:
-          raise Fail("Metrics were not saved. Service check has failed. POST request status: %s %s \n%s" %
-                     (response.status, response.reason, data))
-
-    get_metrics_parameters = {
-      "metricNames": "AMBARI_METRICS.SmokeTest.FakeMetric",
-      "appId": "amssmoketestfake",
-      "hostname": params.hostname,
-      "startTime": current_time - 60000,
-      "endTime": current_time + 61000,
-      "precision": "seconds",
-      "grouped": "false",
-    }
-    encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
-
-    Logger.info("Connecting (GET) to %s:%s%s" % (params.metric_collector_host,
-                                                 params.metric_collector_port,
-                                              self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
-    for i in xrange(0, self.AMS_READ_TRIES):
-      conn = network.get_http_connection(params.metric_collector_host,
-                                         int(params.metric_collector_port),
-                                         params.metric_collector_https_enabled,
-                                         ca_certs)
-      conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
-      response = conn.getresponse()
-      Logger.info("Http response: %s %s" % (response.status, response.reason))
-
-      data = response.read()
-      Logger.info("Http data: %s" % data)
-      conn.close()
-
-      if response.status == 200:
-        Logger.info("Metrics were retrieved.")
-      else:
-        Logger.info("Metrics were not retrieved. Service check has failed.")
-        raise Fail("Metrics were not retrieved. Service check has failed. GET request status: %s %s \n%s" %
-                   (response.status, response.reason, data))
-      data_json = json.loads(data)
-
-      def floats_eq(f1, f2, delta):
-        return abs(f1-f2) < delta
-
-      values_are_present = False
-      for metrics_data in data_json["metrics"]:
-        if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"]
-            and floats_eq(metrics_data["metrics"][str(current_time)], random_value1, 0.0000001)
-            and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)):
-          Logger.info("Values %s and %s were found in the response." % (random_value1, current_time))
-          values_are_present = True
           break
           pass
+    except Fail as ex:
+      Logger.warning("Ambari Metrics service check failed on collector host %s. Reason : %s" % (metric_collector_host, str(ex)))
+      raise Fail("Ambari Metrics service check failed on collector host %s. Reason : %s" % (metric_collector_host, str(ex)))
+
+  @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+  def service_check(self, env):
+    import params
+
+    Logger.info("Ambari Metrics service check was started.")
+    env.set_params(params)
 
-      if not values_are_present:
-        if i < self.AMS_READ_TRIES - 1:  #range/xrange returns items from start to end-1
-          Logger.info("Values weren't stored yet. Retrying in %s seconds."
-                    % (self.AMS_READ_TIMEOUT))
-          time.sleep(self.AMS_READ_TIMEOUT)
+    results = execute_in_parallel(self.service_check_for_single_host, params.ams_collector_hosts.split(','), params)
+
+    for host in str(params.ams_collector_hosts).split(","):
+      if host in results:
+        if results[host].status == SUCCESS:
+          Logger.info("Ambari Metrics service check passed on host " + host)
+          return
         else:
-          Logger.info("Values %s and %s were not found in the response." % (random_value1, current_time))
-          raise Fail("Values %s and %s were not found in the response." % (random_value1, current_time))
-      else:
-        break
-        pass
-    Logger.info("Ambari Metrics service check is finished.")
+          Logger.warning(results[host].result)
+    raise Fail("All metrics collectors are unavailable.")
+
+def post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, metric_collector_port, metric_collector_https_enabled,
+                              metric_json, headers, ca_certs, tries = 1, connect_timeout = 10):
+  for i in xrange(0, tries):
+    try:
+      Logger.info("Generated metrics for host %s :\n%s" % (metric_collector_host, metric_json))
+
+      Logger.info("Connecting (POST) to %s:%s%s" % (metric_collector_host,
+                                                    metric_collector_port,
+                                                    ams_metrics_post_url))
+      conn = network.get_http_connection(metric_collector_host,
+                                         int(metric_collector_port),
+                                         metric_collector_https_enabled,
+                                         ca_certs)
+      conn.request("POST", ams_metrics_post_url, metric_json, headers)
 
+      response = conn.getresponse()
+      Logger.info("Http response for host %s: %s %s" % (metric_collector_host, response.status, response.reason))
+    except (httplib.HTTPException, socket.error) as ex:
+      if i < tries - 1:  #range/xrange returns items from start to end-1
+        time.sleep(connect_timeout)
+        Logger.info("Connection failed for host %s. Next retry in %s seconds."
+                    % (metric_collector_host, connect_timeout))
+        continue
+      else:
+        raise Fail("Metrics were not saved. Connection failed.")
+
+    data = response.read()
+    Logger.info("Http data: %s" % data)
+    conn.close()
+
+    if response.status == 200:
+      Logger.info("Metrics were saved.")
+      break
+    else:
+      Logger.info("Metrics were not saved.")
+      if i < tries - 1:  #range/xrange returns items from start to end-1
+        time.sleep(tries)
+        Logger.info("Next retry in %s seconds."
+                    % (tries))
+      else:
+        raise Fail("Metrics were not saved. POST request status: %s %s \n%s" %
+                   (response.status, response.reason, data))
 if __name__ == "__main__":
   AMSServiceCheck().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
index 678d769..05d1ae5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
@@ -20,7 +20,7 @@
   "name": "{{ams_datasource_name}}",
   "type": "ambarimetrics",
   "access": "proxy",
-  "url": "{{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}",
+  "url": "{{metric_collector_protocol}}://{{ams_datasource_host}}:{{metric_collector_port}}",
   "password": "",
   "user": "",
   "database": "",
@@ -30,4 +30,4 @@
   "withCredentials": false,
   "isDefault": true,
   "jsonData": {}
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
index 09cced6..5663f57 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>falcon.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>falcon_user</name>
     <display-name>Falcon User</display-name>
     <value>falcon</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
index 3ddcc41..91d469b 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
@@ -131,6 +131,13 @@
       <excluded-config-types>
         <config-type>oozie-site</config-type>
       </excluded-config-types>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
index 4429253..4a12b9f 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
@@ -118,7 +118,7 @@ def falcon(type, action = None, upgrade_type=None):
         cd_access = "a")
 
     # Generate atlas-application.properties.xml file
-    if params.falcon_atlas_support:
+    if params.falcon_atlas_support and params.enable_atlas_hook:
       # If Atlas is added later than Falcon, this package will be absent.
       install_atlas_hook_packages(params.atlas_plugin_package, params.atlas_ubuntu_plugin_package, params.host_sys_prepped,
                                   params.agent_stack_retry_on_unavailability, params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
index 0fb21d0..6d1ae03 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
@@ -138,14 +138,15 @@ dfs_data_mirroring_dir = "/apps/data-mirroring"
 ########################################################
 #region Atlas Hooks
 falcon_atlas_application_properties = default('/configurations/falcon-atlas-application.properties', {})
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/falcon-env/falcon.atlas.hook', False)
 
 # Calculate atlas_hook_cp to add to FALCON_EXTRA_CLASS_PATH
 falcon_atlas_support = False
 
 # Path to add to environment variable
 atlas_hook_cp = ""
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+if enable_atlas_hook:
 
   # stack_version doesn't contain a minor number of the stack (only first two numbers: 2.3). Get it from current_version_formatted
   falcon_atlas_support = current_version_formatted and check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT_2_3, current_version_formatted) \

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
new file mode 100644
index 0000000..cc81fca
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
@@ -0,0 +1,35 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"falcon.enableTLS",
+          "desired":"true",
+          "site":"falcon-startup-properties"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "falcon_dashboard",
+        "label": "Falcon Dashboard",
+        "requires_user_name": "true",
+        "component_name": "FALCON_SERVER",
+        "url": "%@://%@:%@/",
+        "port":{
+          "http_property": "falcon_port",
+          "http_default_port": "15000",
+          "https_property": "falcon_port",
+          "https_default_port": "15443",
+          "regex": "^(\\d+)$",
+          "site": "falcon-env"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 008e9d6..a44b461 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -85,7 +85,7 @@ hostname = None
 if config.has_key('hostname'):
   hostname = config['hostname']
 
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 metric_collector_port = None
 if has_metric_collector:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 1f32c41..f47dc8f 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -151,7 +151,7 @@ has_ganglia_server = not len(ganglia_server_hosts) == 0
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index 22ab02a..aad2db0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -395,7 +395,7 @@
   <property>
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <description>ACL for the admins, this configuration is used to control who can access the default servlets in the namenode, etc. The value should be a comma separated list of users and groups. The user list comes first and is separated by a space followed by the group list, e.g. "user1,user2 group1,group2". Both users and groups are optional, so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2" are all valid (note the leading space in " group1"). '*' grants access to all users and groups, e.g. '*', '* ' and ' *' are all valid.</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index 4efdae5..8a06f56 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -178,7 +178,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     else:
       collector_webapp_address = configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":")
       if valid_collector_webapp_address(collector_webapp_address):
-        collector_host = select_metric_collector_for_sink(app_id.lower()).split(":")[0]
+        collector_host = select_metric_collector_for_sink(app_id.lower())
         collector_port = int(collector_webapp_address[1])
       else:
         return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but set to {1}'.format(

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
index 150f629..1213580 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>hive.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>hive.client.heapsize</name>
     <value>512</value>
     <description>Hive Client Java heap size</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
index 5e2c709..c047114 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
@@ -76,6 +76,6 @@ def hcat():
   )
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index bcc598a..792aac3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -229,7 +229,7 @@ def hive(name=None):
             mode=0644)
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 6c44d4b..e9436c1 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -525,9 +525,8 @@ metrics_collection_period = default("/configurations/ams-site/timeline.metrics.s
 ########################################################
 #region Atlas Hooks
 hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
-
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/hive-env/hive.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 #endregion
 
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
index fe3f34a..aa116d6 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
@@ -139,7 +139,7 @@ def webhcat():
     )
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     # WebHCat uses a different config dir than the rest of the daemons in Hive.
     atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 0cb88fe..5635fe3 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -120,7 +120,7 @@ metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.trust
 metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
 metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
 
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 
 if has_metric_collector:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
index ee885e3..aba638a 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
@@ -119,6 +119,20 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>logfeeder_external_solr_kerberos_keytab</name>
+    <value></value>
+    <display-name>Logfeeder External Solr keytab</display-name>
+    <description>The path to the Kerberos Keytab file containing service principal of Logfeeder e.g. /etc/security/keytabs/logfeeder.service.keytab</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logfeeder_external_solr_kerberos_principal</name>
+    <value></value>
+    <display-name>Logfeeder External Solr principal</display-name>
+    <description>The service principal for Logfeeder which will be used to access SolrClient e.g. logfeeder/_HOST@REALM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>logfeeder_kerberos_keytab</name>
     <value>/etc/security/keytabs/logfeeder.service.keytab</value>
     <display-name>Logfeeder Solr keytab</display-name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
index c5b9b4e..2f13710 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
@@ -99,20 +99,100 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch_solr_audit_logs_zk_node</name>
-    <value>{infra_solr_znode}</value>
-    <display-name>Solr Audit Logs Znode</display-name>
-    <description>Only needed if using custom solr cloud. E.g. /audit_logs</description>
+    <name>logsearch_use_external_solr</name>
+    <value>false</value>
+    <display-name>Use External Solr</display-name>
+    <description>Use External Solr to store logs</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_zk_znode</name>
+    <value></value>
+    <display-name>External Solr Logs Znode</display-name>
+    <description>Only needed if using custom solr cloud. E.g. /logsearch_solr</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch_solr_audit_logs_zk_quorum</name>
-    <value>{zookeeper_quorum}</value>
-    <display-name>Solr Audit Logs ZK Quorum</display-name>
+    <name>logsearch_external_solr_zk_quorum</name>
+    <value></value>
+    <display-name>External Solr Logs ZK Quorum</display-name>
     <description>Only needed if using custom solr cloud. E.g. zk1:2181,zk2:2182</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>logsearch_external_solr_ssl_enabled</name>
+    <value>false</value>
+    <display-name>Log Search External Solr SSL Enabled</display-name>
+    <description>Use SSL to connect to the external solr</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_enabled</name>
+    <value>false</value>
+    <display-name>Log Search External Solr Kerberos Enabled</display-name>
+    <description>Use Kerberos to connect to the external solr</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_keytab</name>
+    <value></value>
+    <display-name>Log Search External Solr keytab</display-name>
+    <description>The path to the Kerberos Keytab file containing service principal of Log Search e.g. /etc/security/keytabs/logsearch.service.keytab</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_principal</name>
+    <value></value>
+    <display-name>Log Search External Solr principal</display-name>
+    <description>The service principal for Log Search which will be used to access SolrClient  e.g. logsearch/_HOST@REALM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>logsearch_truststore_location</name>
     <value>/etc/security/serverKeys/logsearch.trustStore.jks</value>
     <display-name>Log Search trust store location</display-name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
index 10ca9ae..b474c5d 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
@@ -238,7 +238,7 @@
   </property>
   <property>
     <name>logsearch.spnego.kerberos.enabled</name>
-    <value/>
+    <value>false</value>
     <display-name>Http Spnego enabled</display-name>
     <description>Enable SPNEGO based authentication on the Log Search UI</description>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
index f69bd3c..6921a8e 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
@@ -36,6 +36,7 @@
           <commandScript>
             <script>scripts/logsearch.py</script>
             <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
           </commandScript>
           <logs>
             <log>
@@ -54,15 +55,6 @@
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
-
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
           </dependencies>
           <configuration-dependencies>
             <config-type>infra-solr-env</config-type>
@@ -94,20 +86,9 @@
             </log>
           </logs>
 
-          <dependencies>
-
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
-          </dependencies>
-
           <configuration-dependencies>
             <config-type>infra-solr-env</config-type>
+            <config-type>logsearch-env</config-type>
             <config-type>logfeeder-properties</config-type>
             <config-type>logfeeder-env</config-type>
             <config-type>logfeeder-grok</config-type>
@@ -134,6 +115,10 @@
               <skipUpgrade>true</skipUpgrade>
               <condition>should_install_logsearch_portal</condition>
             </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
           </packages>
         </osSpecific>
         <osSpecific>
@@ -148,6 +133,10 @@
               <skipUpgrade>true</skipUpgrade>
               <condition>should_install_logsearch_portal</condition>
             </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
@@ -158,10 +147,6 @@
         <timeout>300</timeout>
       </commandScript>
 
-      <requiredServices>
-        <service>AMBARI_INFRA</service>
-      </requiredServices>
-
       <quickLinksConfigurations>
         <quickLinksConfiguration>
           <fileName>quicklinks.json</fileName>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index d086f9f..c20d92d 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -38,7 +38,6 @@ def get_port_from_url(address):
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-stack_version = default("/commandParams/version", None)
 sudo = AMBARI_SUDO_BINARY
 security_enabled = status_params.security_enabled
 
@@ -56,16 +55,10 @@ logfeeder_pid_dir = status_params.logfeeder_pid_dir
 logfeeder_pid_file = status_params.logfeeder_pid_file
 
 user_group = config['configurations']['cluster-env']['user_group']
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
 
 # shared configs
 java64_home = config['hostLevelParams']['java_home']
-zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts_list.sort()
-# get comma separated list of zookeeper hosts from clusterHostInfo
-zookeeper_hosts = ",".join(zookeeper_hosts_list)
 cluster_name = str(config['clusterName'])
-availableServices = config['availableServices']
 
 configurations = config['configurations'] # need reference inside logfeeder jinja templates
 logserch_meta_configs = get_logsearch_meta_configs(configurations)
@@ -84,30 +77,31 @@ else:
 #####################################
 # Infra Solr configs
 #####################################
-infra_solr_znode = default('/configurations/infra-solr-env/infra_solr_znode', '/infra-solr')
-infra_solr_instance_count = len(config['clusterHostInfo']['infra_solr_hosts'])
-infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
-infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
-
-zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
-index = 0
-zookeeper_quorum = ""
-for host in config['clusterHostInfo']['zookeeper_hosts']:
-  zookeeper_quorum += host + ":" + str(zookeeper_port)
-  index += 1
-  if index < len(config['clusterHostInfo']['zookeeper_hosts']):
-    zookeeper_quorum += ","
+infra_solr_znode = '/infra-solr'
+infra_solr_ssl_enabled = False
+infra_solr_jmx_port = ''
 
+if 'infra-solr-env' in config['configurations']:
+  infra_solr_znode = default('/configurations/infra-solr-env/infra_solr_znode', '/infra-solr')
+  infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
+  infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
 
 if security_enabled:
   kinit_path_local = status_params.kinit_path_local
   _hostname_lowercase = config['hostname'].lower()
   logsearch_jaas_file = logsearch_server_conf + '/logsearch_jaas.conf'
   logfeeder_jaas_file = logsearch_logfeeder_conf + '/logfeeder_jaas.conf'
-  logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_kerberos_keytab']
-  logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_kerberos_principal'].replace('_HOST',_hostname_lowercase)
-  logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_kerberos_keytab']
-  logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+  use_external_solr_with_kerberos = default('configurations/logsearch-env/logsearch_external_solr_kerberos_enabled', False)
+  if use_external_solr_with_kerberos:
+    logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_external_solr_kerberos_keytab']
+    logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_external_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+    logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_external_solr_kerberos_keytab']
+    logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_external_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+  else:
+    logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_kerberos_keytab']
+    logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+    logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_kerberos_keytab']
+    logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_kerberos_principal'].replace('_HOST',_hostname_lowercase)
 
 #####################################
 # Logsearch configs
@@ -120,10 +114,29 @@ logsearch_service_logs_merge_factor = config['configurations']['logsearch-servic
 logsearch_audit_logs_max_retention = config['configurations']['logsearch-audit_logs-solrconfig']['logsearch_audit_logs_max_retention']
 logsearch_audit_logs_merge_factor = config['configurations']['logsearch-audit_logs-solrconfig']['logsearch_audit_logs_merge_factor']
 
-logsearch_solr_audit_logs_zk_node = default('/configurations/logsearch-env/logsearch_solr_audit_logs_zk_node', infra_solr_znode)
-logsearch_solr_audit_logs_zk_quorum = default('/configurations/logsearch-env/logsearch_solr_audit_logs_zk_quorum', zookeeper_quorum)
-logsearch_solr_audit_logs_zk_node = format(logsearch_solr_audit_logs_zk_node)
-logsearch_solr_audit_logs_zk_quorum = format(logsearch_solr_audit_logs_zk_quorum)
+logsearch_use_external_solr = default('/configurations/logsearch-env/logsearch_use_external_solr', False)
+
+if logsearch_use_external_solr:
+  logsearch_solr_zk_znode = config['configurations']['logsearch-env']['logsearch_external_solr_zk_znode']
+  logsearch_solr_zk_quorum = config['configurations']['logsearch-env']['logsearch_external_solr_zk_quorum']
+  logsearch_solr_ssl_enabled = default('configurations/logsearch-env/logsearch_external_solr_ssl_enabled', False)
+  logsearch_solr_kerberos_enabled = security_enabled and default('configurations/logsearch-env/logsearch_external_solr_kerberos_enabled', False)
+else:
+  logsearch_solr_zk_znode = infra_solr_znode
+
+  logsearch_solr_zk_quorum = ""
+  zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+  if 'zookeeper_hosts' in config['clusterHostInfo']:
+    for host in config['clusterHostInfo']['zookeeper_hosts']:
+      if logsearch_solr_zk_quorum:
+        logsearch_solr_zk_quorum += ','
+      logsearch_solr_zk_quorum += host + ":" + str(zookeeper_port)
+  
+  logsearch_solr_ssl_enabled = infra_solr_ssl_enabled
+  logsearch_solr_kerberos_enabled = security_enabled
+
+zookeeper_quorum = logsearch_solr_zk_quorum
+
 
 
 # logsearch-env configs
@@ -179,8 +192,8 @@ logsearch_properties = {}
 
 # default values
 
-logsearch_properties['logsearch.solr.zk_connect_string'] = zookeeper_quorum + infra_solr_znode
-logsearch_properties['logsearch.solr.audit.logs.zk_connect_string'] = logsearch_solr_audit_logs_zk_quorum + logsearch_solr_audit_logs_zk_node
+logsearch_properties['logsearch.solr.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
+logsearch_properties['logsearch.solr.audit.logs.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
 
 logsearch_properties['logsearch.solr.collection.history'] = 'history'
 logsearch_properties['logsearch.solr.history.config.name'] = 'history'
@@ -210,7 +223,7 @@ del logsearch_properties['logsearch.solr.audit.logs.use.ranger']
 logsearch_properties['logsearch.solr.metrics.collector.hosts'] = format(logsearch_properties['logsearch.solr.metrics.collector.hosts'])
 logsearch_properties['logsearch.auth.external_auth.host_url'] = format(logsearch_properties['logsearch.auth.external_auth.host_url'])
 
-if security_enabled:
+if logsearch_solr_kerberos_enabled:
   logsearch_properties['logsearch.solr.kerberos.enable'] = 'true'
   logsearch_properties['logsearch.solr.jaas.file'] = logsearch_jaas_file
 
@@ -293,9 +306,9 @@ logfeeder_properties = dict(logfeeder_properties.items() + dict(config['configur
 
 logfeeder_properties['logfeeder.metrics.collector.hosts'] = format(logfeeder_properties['logfeeder.metrics.collector.hosts'])
 logfeeder_properties['logfeeder.config.files'] = format(logfeeder_properties['logfeeder.config.files'])
-logfeeder_properties['logfeeder.solr.zk_connect_string'] = zookeeper_quorum + infra_solr_znode
+logfeeder_properties['logfeeder.solr.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
 
-if security_enabled:
+if logsearch_solr_kerberos_enabled:
   if 'logfeeder.solr.kerberos.enable' not in logfeeder_properties:
     logfeeder_properties['logfeeder.solr.kerberos.enable'] = 'true'
   if 'logfeeder.solr.jaas.file' not in logfeeder_properties:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
index 9582334..14f8d20 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
@@ -92,7 +92,7 @@ def setup_logfeeder():
          )
 
 
-  if params.security_enabled:
+  if params.logsearch_solr_kerberos_enabled:
     File(format("{logfeeder_jaas_file}"),
          content=Template("logfeeder_jaas.conf.j2")
          )

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
index 6c1a936..874b90b 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
@@ -118,10 +118,10 @@ def setup_logsearch():
 
 def upload_conf_set(config_set, solrconfig_content = None):
   import params
-  jaas_file = params.logsearch_jaas_file if params.security_enabled else None
+  jaas_file = params.logsearch_jaas_file if params.logsearch_solr_kerberos_enabled else None
   solr_cloud_util.upload_configuration_to_zk(
-    zookeeper_quorum=params.zookeeper_quorum,
-    solr_znode=params.infra_solr_znode,
+    zookeeper_quorum=params.logsearch_solr_zk_quorum,
+    solr_znode=params.logsearch_solr_zk_znode,
     config_set_dir=format("{logsearch_server_conf}/solr_configsets/{config_set}/conf"),
     config_set=config_set,
     tmp_dir=params.tmp_dir,
@@ -134,6 +134,7 @@ def upload_conf_set(config_set, solrconfig_content = None):
 def check_znode():
   import params
   solr_cloud_util.check_znode(
-    zookeeper_quorum=params.zookeeper_quorum,
-    solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
+    zookeeper_quorum=params.logsearch_solr_zk_quorum,
+    solr_znode=params.logsearch_solr_zk_znode,
+    java64_home=params.java64_home,
+    retry=30, interval=5)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
index 2818708..6795dab 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
@@ -32,7 +32,7 @@ if [ "$LOGFEEDER_JAVA_MEM" = "" ]; then
   export LOGFEEDER_JAVA_MEM=-Xmx{{logfeeder_max_mem}}
 fi
 
-{% if infra_solr_ssl_enabled %}
+{% if logsearch_solr_ssl_enabled %}
 export LOGFEEDER_SSL="true"
 export LOGFEEDER_KEYSTORE_LOCATION={{logfeeder_keystore_location}}
 export LOGFEEDER_KEYSTORE_PASSWORD={{logfeeder_keystore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
index eb7306c..a179983 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
@@ -38,7 +38,7 @@ export LOGSEARCH_DEBUG={{logsearch_debug_enabled}}
 
 export LOGSEARCH_DEBUG_PORT={{logsearch_debug_port}}
 
-{% if infra_solr_ssl_enabled or logsearch_ui_protocol == 'https' or ambari_server_use_ssl %}
+{% if logsearch_solr_ssl_enabled or logsearch_ui_protocol == 'https' or ambari_server_use_ssl %}
 export LOGSEARCH_SSL="true"
 export LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}
 export LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
index ce39030..06fdad2 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
@@ -25,7 +25,7 @@ limitations under the License.
   </appender>
 
   <appender name="rolling_file" class="org.apache.log4j.RollingFileAppender">
-    <param name="file" value="{{logsearch_log_dir}}/logsearch.err" />
+    <param name="file" value="{{logsearch_log_dir}}/logsearch.log" />
     <param name="Threshold" value="info" />
     <param name="append" value="true" />
     <param name="maxFileSize" value="10MB" />
@@ -74,7 +74,7 @@ limitations under the License.
   </category>
 
   <root>
-    <priority value="warn"/>
+    <priority value="info"/>
     <!-- <appender-ref ref="console" /> -->
     <appender-ref ref="rolling_file" />
     <appender-ref ref="rolling_file_json"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
index 062d636..214e5ba 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
@@ -21,7 +21,7 @@
       "comment":"Output to solr for service logs",
       "is_enabled":"{{solr_service_logs_enable}}",
       "destination":"solr",
-      "zk_connect_string":"{{zookeeper_quorum}}{{infra_solr_znode}}",
+      "zk_connect_string":"{{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}",
       "collection":"{{logsearch_solr_collection_service_logs}}",
       "number_of_shards": "{{logsearch_collection_service_logs_numshards}}",
       "splits_interval_mins": "{{logsearch_service_logs_split_interval_mins}}",
@@ -40,7 +40,7 @@
       "comment":"Output to solr for audit records",
       "is_enabled":"{{solr_audit_logs_enable}}",
       "destination":"solr",
-      "zk_connect_string":"{{zookeeper_quorum}}{{infra_solr_znode}}",
+      "zk_connect_string":"{{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}",
       "collection":"{{logsearch_solr_collection_audit_logs}}",
       "number_of_shards": "{{logsearch_collection_audit_logs_numshards}}",
       "splits_interval_mins": "{{logsearch_audit_logs_split_interval_mins}}",
@@ -57,5 +57,5 @@
     }
 
   ]
-
+  
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
index 0193689..2858825 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
@@ -57,6 +57,25 @@
                       "column-span": "1"
                     }
                   ]
+                },
+                {
+                  "name": "section-logsearch-solr-connection",
+                  "display-name": "Solr Connection",
+                  "row-index": "4",
+                  "column-index": "1",
+                  "row-span": "3",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "4",
+                  "subsections": [
+                    {
+                      "name": "subsection-logsearch-solr-connection-col1",
+                      "row-index": "4",
+                      "column-index": "0",
+                      "row-span": "3",
+                      "column-span": "1"
+                    }
+                  ]
                 }
               ]
             }
@@ -138,6 +157,186 @@
               }
             }
           ]
+        },
+        {
+          "config": "logsearch-env/logsearch_use_external_solr",
+          "subsection-name": "subsection-logsearch-solr-connection-col1"
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_zk_znode",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_zk_quorum",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_ssl_enabled",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_enabled",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_keytab",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_principal",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logfeeder-env/logfeeder_external_solr_kerberos_keytab",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logfeeder-env/logfeeder_external_solr_kerberos_principal",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
         }
       ]
     },
@@ -220,6 +419,60 @@
         "widget": {
           "type": "directories"
         }
+      },
+      {
+        "config": "logsearch-env/logsearch_use_external_solr",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_zk_znode",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_zk_quorum",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_ssl_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_keytab",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_principal",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logfeeder-env/logfeeder_external_solr_kerberos_keytab",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logfeeder-env/logfeeder_external_solr_kerberos_principal",
+        "widget": {
+          "type": "text-field"
+        }
       }
     ]
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
index e27b363..f2cc940 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -70,6 +70,7 @@ stack_supports_usersync_passwd = check_stack_feature(StackFeature.RANGER_USERSYN
 stack_supports_infra_client = check_stack_feature(StackFeature.RANGER_INSTALL_INFRA_CLIENT, version_for_stack_feature_checks)
 stack_supports_pid = check_stack_feature(StackFeature.RANGER_PID_SUPPORT, version_for_stack_feature_checks)
 stack_supports_ranger_admin_password_change = check_stack_feature(StackFeature.RANGER_ADMIN_PASSWD_CHANGE, version_for_stack_feature_checks)
+stack_supports_ranger_setup_db_on_start = check_stack_feature(StackFeature.RANGER_SETUP_DB_ON_START, version_for_stack_feature_checks)
 
 downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
index a9656c7..b849d58 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -45,18 +45,9 @@ class RangerAdmin(Script):
     self.install_packages(env)
     import params
     env.set_params(params)
-    if params.xml_configurations_supported:
-      from setup_ranger_xml import setup_ranger_db
-      setup_ranger_db()
-
-    self.configure(env)
-
-    if params.xml_configurations_supported:
-      from setup_ranger_xml import setup_java_patch
-      setup_java_patch()
-
-    if params.stack_supports_ranger_admin_password_change:
-      setup_ranger_admin_passwd_change()
+    # call config and setup db only in case of HDP version < 2.6
+    if not params.stack_supports_ranger_setup_db_on_start:
+      self.configure(env, setup_db=True)
 
   def stop(self, env, upgrade_type=None):
     import params
@@ -93,7 +84,9 @@ class RangerAdmin(Script):
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    self.configure(env, upgrade_type=upgrade_type)
+
+    # setup db only if in case HDP version is > 2.6
+    self.configure(env, upgrade_type=upgrade_type, setup_db=params.stack_supports_ranger_setup_db_on_start)
 
     if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
       solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
@@ -122,7 +115,7 @@ class RangerAdmin(Script):
         raise ComponentIsNotRunning()
     pass
 
-  def configure(self, env, upgrade_type=None):
+  def configure(self, env, upgrade_type=None, setup_db=False):
     import params
     env.set_params(params)
     if params.xml_configurations_supported:
@@ -130,8 +123,23 @@ class RangerAdmin(Script):
     else:
       from setup_ranger import ranger
 
+    # set up db if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_ranger_db
+        setup_ranger_db()
+
     ranger('ranger_admin', upgrade_type=upgrade_type)
 
+    # set up java patches if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_java_patch
+        setup_java_patch()
+
+      if params.stack_supports_ranger_admin_password_change:
+        setup_ranger_admin_passwd_change()
+
   def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
     config_dir = os.path.dirname(upgrade_marker_file)
     try:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
index f682e97..508cfab 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
@@ -22,6 +22,23 @@
 <configuration supports_adding_forbidden="true">
   <!-- sqoop-env.sh -->
   <property>
+    <name>sqoop.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>content</name>
     <display-name>sqoop-env template</display-name>
     <description>This is the jinja template for sqoop-env.sh file</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
index f7823d2..5d87c4d 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
@@ -35,5 +35,11 @@
   <property>
     <name>sqoop.job.data.publish.class</name>
     <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>sqoop-env</type>
+        <name>sqoop.atlas.hook</name>
+      </property>
+    </depends-on>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
index 283f54d..c1138b3 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
@@ -130,7 +130,6 @@ jdk_location = config['hostLevelParams']['jdk_location']
 ########################################################
 #region Atlas Hooks
 sqoop_atlas_application_properties = default('/configurations/sqoop-atlas-application.properties', {})
-
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/sqoop-env/sqoop.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 #endregion

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
index 68f06db..d005cbe 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
@@ -64,7 +64,7 @@ def sqoop(type=None):
             )
 
   # Generate atlas-application.properties.xml file and symlink the hook jars
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.sqoop_conf_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.SQOOP, params.sqoop_atlas_application_properties, atlas_hook_filepath, params.sqoop_user, params.user_group)
     setup_atlas_jar_symlinks("sqoop", params.sqoop_lib)


[12/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
deleted file mode 100644
index 0946c85..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_WARNING = 'WARNING'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-OK_MESSAGE = 'Ok'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = '{{ams-site/timeline.metrics.service.webapp.address}}'
-METRICS_COLLECTOR_VIP_HOST_KEY = '{{cluster-env/metrics_collector_vip_host}}'
-METRICS_COLLECTOR_VIP_PORT_KEY = '{{cluster-env/metrics_collector_vip_port}}'
-
-logger = logging.getLogger()
-
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,
-          EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,
-          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,
-          METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,
-          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations : a mapping of configuration key to value
-  parameters : a mapping of script parameter key to value
-  host_name : the name of this host where the alert is running
-
-  :type configurations dict
-  :type parameters dict
-  :type host_name str
-  """
-
-  #parse configuration
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  result_code = RESULT_STATE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
new file mode 100644
index 0000000..73bc168
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
index 0dff200..73bc168 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
@@ -20,32 +20,30 @@ limitations under the License.
 
 import logging
 
-RESULT_STATE_OK = 'OK'
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
 
-OK_MESSAGE = 'Ok'
+ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
 
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
 
-HDFS_SITE_KEY = '{{hdfs-site}}'
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
 
 logger = logging.getLogger('ambari_alerts')
 
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
 def get_tokens():
   """
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
-
-  :rtype tuple
   """
-  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
-          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -53,22 +51,9 @@ def execute(configurations={}, parameters={}, host_name=None):
   Returns a tuple containing the result code and a pre-formatted result label
 
   Keyword arguments:
-  configurations : a mapping of configuration key to value
-  parameters : a mapping of script parameter key to value
-  host_name : the name of this host where the alert is running
-
-  :type configurations dict
-  :type parameters dict
-  :type host_name str
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
   """
 
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return 'SKIPPED', ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]
-
-  result_code = RESULT_STATE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])
\ No newline at end of file
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
index 36edc31..6fc338b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
@@ -33,6 +33,10 @@ class DataNode(Dummy):
   def __init__(self):
     super(DataNode, self).__init__()
     self.component_name = "DATANODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.datanode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.datanode.keytab.file"
 
 if __name__ == "__main__":
   DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
index 1ad13b7..96be630 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
@@ -33,6 +33,10 @@ class JournalNode(Dummy):
   def __init__(self):
     super(JournalNode, self).__init__()
     self.component_name = "JOURNALNODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.journalnode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.journalnode.keytab.file"
 
 if __name__ == "__main__":
   JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
index ded09cb..c3488e8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
@@ -34,6 +34,10 @@ class NameNode(Dummy):
   def __init__(self):
     super(NameNode, self).__init__()
     self.component_name = "NAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.namenode.keytab.file"
 
   def rebalancehdfs(self, env):
     print "Rebalance HDFS"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
index ab9855d..b750522 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
@@ -33,6 +33,10 @@ class NFSGateway(Dummy):
   def __init__(self):
     super(NFSGateway, self).__init__()
     self.component_name = "NFS_GATEWAY"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "nfs.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "nfs.keytab.file"
 
 if __name__ == "__main__":
   NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
index 8815aa3..91ce7da 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
@@ -33,6 +33,10 @@ class SNameNode(Dummy):
   def __init__(self):
     super(SNameNode, self).__init__()
     self.component_name = "SECONDARY_NAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.secondary.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.secondary.namenode.keytab.file"
 
 if __name__ == "__main__":
   SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
new file mode 100644
index 0000000..7016437
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
@@ -0,0 +1,380 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property require-input="true">
+    <name>kdc_type</name>
+    <description>
+      The type of KDC being used. Either mit-kdc, ipa, or active-directory
+    </description>
+    <value>mit-kdc</value>
+    <display-name>KDC type</display-name>
+    <value-attributes>
+      <type>componentHost</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_identities</name>
+    <description>
+      Indicates whether the Ambari user and service Kerberos identities (principals and keytab files)
+      should be managed (created, deleted, updated, etc...) by Ambari or managed manually.
+    </description>
+    <value>true</value>
+    <display-name>Manage Kerberos Identities</display-name>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_auth_to_local</name>
+    <description>
+      Indicates whether the hadoop auth_to_local rules should be managed by Ambari or managed manually.
+    </description>
+    <value>true</value>
+    <display-name>Manage Hadoop auth_to_local rules</display-name>
+    <value-attributes>
+      <visible>true</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>install_packages</name>
+    <display-name>Install OS-specific Kerberos client package(s)</display-name>
+    <description>
+      Indicates whether Ambari should install the Kerberos client package(s) or not. If not, it is
+      expected that Kerberos utility programs (such as kadmin, kinit, klist, and kdestroy) are
+      compatible with MIT Kerberos 5 version 1.10.3 in command line options and behaviors.
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ldap_url</name>
+    <display-name>LDAP url</display-name>
+    <description>
+      The URL to the Active Directory LDAP Interface
+      Example: ldaps://ad.example.com:636
+    </description>
+    <value/>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+      <type>ldap_url</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>container_dn</name>
+    <display-name>Container DN</display-name>
+    <description>
+      The distinguished name (DN) of the container used store service principals
+    </description>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <value/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>encryption_types</name>
+    <display-name>Encryption Types</display-name>
+    <description>
+      The supported list of session key encryption types that should be returned by the KDC.
+    </description>
+    <value>aes des3-cbc-sha1 rc4 des-cbc-md5</value>
+    <value-attributes>
+      <type>multiLine</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property require-input="true">
+    <name>realm</name>
+    <description>
+      The default realm to use when creating service principals
+    </description>
+    <display-name>Realm name</display-name>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kdc_hosts</name>
+    <description>
+      A comma-delimited list of IP addresses or FQDNs declaring the KDC hosts.
+      Optionally a port number may be included in each entry by separating each host and port by a
+      colon (:). Example:  kdc1.example.com:88, kdc2.example.com:88
+    </description>
+    <display-name>KDC hosts</display-name>
+    <value/>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>admin_server_host</name>
+    <display-name>Kadmin host</display-name>
+    <description>
+      The IP address or FQDN for the KDC Kerberos administrative host. Optionally a port number may be included.
+    </description>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>executable_search_paths</name>
+    <display-name>Executable Search Paths</display-name>
+    <description>
+      A comma-delimited list of search paths to use to find Kerberos utilities like kadmin, kinit and ipa.
+    </description>
+    <value>/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_length</name>
+    <display-name>Password Length</display-name>
+    <description>
+      The length required length for generated passwords.
+    </description>
+    <value>20</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_lowercase_letters</name>
+    <display-name>Password Minimum # Lowercase Letters</display-name>
+    <description>
+      The minimum number of lowercase letters (a-z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_uppercase_letters</name>
+    <display-name>Password Minimum # Uppercase Letters</display-name>
+    <description>
+      The minimum number of uppercase letters (A-Z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_digits</name>
+    <display-name>Password Minimum # Digits</display-name>
+    <description>
+      The minimum number of digits (0-9) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_punctuation</name>
+    <display-name>Password Minimum # Punctuation Characters</display-name>
+    <description>
+      The minimum number of punctuation characters (?.!$%^*()-_+=~) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_whitespace</name>
+    <display-name>Password Minimum # Whitespace Characters</display-name>
+    <description>
+      The minimum number of whitespace characters required in generated passwords
+    </description>
+    <value>0</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>service_check_principal_name</name>
+    <display-name>Test Kerberos Principal</display-name>
+    <description>
+      The principal name to use when executing the Kerberos service check
+    </description>
+    <value>${cluster_name|toLower()}-${short_date}</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>case_insensitive_username_rules</name>
+    <display-name>Enable case insensitive username rules</display-name>
+    <description>
+      Force principal names to resolve to lowercase local usernames in auth-to-local rules
+    </description>
+    <value>false</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ad_create_attributes_template</name>
+    <display-name>Account Attribute Template</display-name>
+    <description>
+      A Velocity template to use to generate a JSON-formatted document containing the set of
+      attribute names and values needed to create a new Kerberos identity in the relevant
+      Active Directory.
+      Variables include:
+      principal_name, principal_primary, principal_instance, realm, realm_lowercase,
+      normalized_principal, principal digest, password, is_service, container_dn
+    </description>
+    <value>
+{
+  "objectClass": ["top", "person", "organizationalPerson", "user"],
+  "cn": "$principal_name",
+  #if( $is_service )
+  "servicePrincipalName": "$principal_name",
+  #end
+  "userPrincipalName": "$normalized_principal",
+  "unicodePwd": "$password",
+  "accountExpires": "0",
+  "userAccountControl": "66048"
+}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>create_ambari_principal</name>
+    <description>
+    Indicates whether Ambari should create the principal, keytab for itself, used by different views.
+  </description>
+    <value>true</value>
+    <display-name>Create Ambari Principal &amp; Keytab</display-name>
+    <value-attributes>
+      <visible>true</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kdc_create_attributes</name>
+    <display-name>Principal Attributes</display-name>
+    <description>
+      The set of attributes to use when creating a new Kerberos identity in the relevant (MIT) KDC.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>group</name>
+    <display-name>IPA Group</display-name>
+    <description>
+      The group in IPA user principals should be member of
+    </description>
+    <value>ambari-managed-principals</value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>set_password_expiry</name>
+    <display-name>Set IPA principal password expiry</display-name>
+    <description>
+      Indicates whether Ambari should set the password expiry for the principals it creates. By default
+      IPA does not allow this. It requires write permission of the admin principal to the krbPasswordExpiry
+      attribute. If set IPA principal password expiry is not true it is assumed that a suitable password
+      policy is in place for the IPA Group principals are added to.
+    </description>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_chat_timeout</name>
+    <display-name>Set IPA kinit password chat timeout</display-name>
+    <description>
+      Indicates the timeout in seconds that Ambari should wait for a response during a password chat. This is
+      because it can take some time due to lookups before a response is there.
+    </description>
+    <value>5</value>
+    <value-attributes>
+      <visible>false</visible>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
new file mode 100644
index 0000000..c692b92
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property require-input="false">
+    <name>domains</name>
+    <display-name>Domains</display-name>
+    <description>
+      A comma-separated list of domain names used to map server host names to the Realm name (e.g. .example.com,example.com). This is optional
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_krb5_conf</name>
+    <display-name>Manage Kerberos client krb5.conf</display-name>
+    <description>
+      Indicates whether your krb5.conf file should be managed by the wizard or should you manage it yourself
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>conf_dir</name>
+    <display-name>krb5-conf directory path</display-name>
+    <description>The krb5.conf configuration directory</description>
+    <value>/etc</value>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>krb5-conf template</display-name>
+    <description>Customizable krb5.conf template (Jinja template engine)</description>
+    <value>
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  default_ccache_name = /tmp/krb5cc_%{uid}
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+{% if domains %}
+[domain_realm]
+{%- for domain in domains.split(',') %}
+  {{domain|trim()}} = {{realm}}
+{%- endfor %}
+{% endif %}
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts &gt; 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length &gt; 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
new file mode 100644
index 0000000..6ab7610
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "KERBEROS",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "KERBEROS_CLIENT"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
new file mode 100644
index 0000000..3ec6340
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
@@ -0,0 +1,123 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KERBEROS</name>
+            <displayName>Kerberos</displayName>
+            <comment>A computer network authentication protocol which works on
+                the basis of 'tickets' to allow nodes communicating over a
+                non-secure network to prove their identity to one another in a
+                secure manner.
+            </comment>
+            <version>1.10.3-10</version>
+
+            <components>
+                <component>
+                    <name>KERBEROS_CLIENT</name>
+                    <displayName>Kerberos Client</displayName>
+                    <category>CLIENT</category>
+                    <cardinality>ALL</cardinality>
+                    <versionAdvertised>false</versionAdvertised>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <commandScript>
+                        <script>scripts/kerberos_client.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>1200</timeout>
+                    </commandScript>
+                    <customCommands>
+                        <customCommand>
+                            <name>SET_KEYTAB</name>
+                            <commandScript>
+                                <script>scripts/kerberos_client.py</script>
+                                <scriptType>PYTHON</scriptType>
+                                <timeout>1000</timeout>
+                            </commandScript>
+                        </customCommand>
+                        <customCommand>
+                            <name>REMOVE_KEYTAB</name>
+                            <commandScript>
+                                <script>scripts/kerberos_client.py</script>
+                                <scriptType>PYTHON</scriptType>
+                                <timeout>1000</timeout>
+                            </commandScript>
+                        </customCommand>
+                    </customCommands>
+                    <configFiles>
+                        <configFile>
+                            <type>env</type>
+                            <fileName>krb5.conf</fileName>
+                            <dictionaryName>krb5-conf</dictionaryName>
+                        </configFile>
+                    </configFiles>
+                </component>
+            </components>
+
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat7,amazon2015,redhat6</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-workstation</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+
+                <osSpecific>
+                    <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-user</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                        <package>
+                            <name>krb5-config</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+
+                <osSpecific>
+                    <osFamily>suse11,suse12</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-client</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>
+            </commandScript>
+
+            <configuration-dependencies>
+                <config-type>krb5-conf</config-type>
+                <config-type>kerberos-env</config-type>
+            </configuration-dependencies>
+            <restartRequiredAfterChange>true</restartRequiredAfterChange>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
new file mode 100644
index 0000000..ddc8063
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from kerberos_common import *
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class KerberosClient(KerberosScript):
+  def install(self, env):
+    install_packages = default('/configurations/kerberos-env/install_packages', "true")
+    if install_packages:
+      self.install_packages(env)
+    else:
+      print "Kerberos client packages are not being installed, manual installation is required."
+
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    if params.manage_krb5_conf:
+      self.write_krb5_conf()
+    #delete krb cache to prevent using old krb tickets on fresh kerberos setup
+    self.clear_tmp_cache()
+
+    self.setup_jce()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def security_status(self, env):
+    import status_params
+    if status_params.security_enabled:
+      if status_params.smoke_user and status_params.smoke_user_keytab:
+        try:
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.smoke_user,
+                                status_params.smoke_user_keytab,
+                                status_params.smoke_user_principal,
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        self.put_structured_out({"securityState": "UNKNOWN"})
+        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def set_keytab(self, env):
+    self.write_keytab_file()
+
+  def remove_keytab(self, env):
+    self.delete_keytab_file()
+
+  def download_install_jce(self, env):
+    self.setup_jce()
+
+
+if __name__ == "__main__":
+  KerberosClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
new file mode 100644
index 0000000..3c6c83e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
@@ -0,0 +1,468 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import base64
+import getpass
+import os
+import string
+import subprocess
+import sys
+import tempfile
+from tempfile import gettempdir
+
+from resource_management import *
+from utils import get_property_value
+from ambari_commons.os_utils import remove_file
+from ambari_agent import Constants
+
+class KerberosScript(Script):
+  KRB5_REALM_PROPERTIES = [
+    'kdc',
+    'admin_server',
+    'default_domain',
+    'master_kdc'
+  ]
+
+  KRB5_SECTION_NAMES = [
+    'libdefaults',
+    'logging',
+    'realms',
+    'domain_realm',
+    'capaths',
+    'ca_paths',
+    'appdefaults',
+    'plugins'
+  ]
+
+  @staticmethod
+  def create_random_password():
+    import random
+
+    chars = string.digits + string.ascii_letters
+    return ''.join(random.choice(chars) for x in range(13))
+
+  @staticmethod
+  def write_conf_section(output_file, section_name, section_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if section_data is not None:
+        for key, value in section_data.iteritems():
+          output_file.write(" %s = %s\n" % (key, value))
+
+
+  @staticmethod
+  def _write_conf_realm(output_file, realm_name, realm_data):
+    """ Writes out realm details
+
+    Example:
+
+     EXAMPLE.COM = {
+      kdc = kerberos.example.com
+      admin_server = kerberos.example.com
+     }
+
+    """
+    if realm_name is not None:
+      output_file.write(" %s = {\n" % realm_name)
+
+      if realm_data is not None:
+        for key, value in realm_data.iteritems():
+          if key in KerberosScript.KRB5_REALM_PROPERTIES:
+            output_file.write("  %s = %s\n" % (key, value))
+
+      output_file.write(" }\n")
+
+  @staticmethod
+  def write_conf_realms_section(output_file, section_name, realms_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if realms_data is not None:
+        for realm, realm_data in realms_data.iteritems():
+          KerberosScript._write_conf_realm(output_file, realm, realm_data)
+          output_file.write('\n')
+
+  @staticmethod
+  def write_krb5_conf():
+    import params
+
+    Directory(params.krb5_conf_dir,
+              owner='root',
+              create_parents = True,
+              group='root',
+              mode=0755
+    )
+
+    if (params.krb5_conf_template is None) or not params.krb5_conf_template.strip():
+      content = Template('krb5_conf.j2')
+    else:
+      content = InlineTemplate(params.krb5_conf_template)
+
+    File(params.krb5_conf_path,
+         content=content,
+         owner='root',
+         group='root',
+         mode=0644
+    )
+
+  @staticmethod
+  def invoke_kadmin(query, admin_identity=None, default_realm=None):
+    """
+    Executes the kadmin or kadmin.local command (depending on whether auth_identity is set or not
+    and returns command result code and standard out data.
+
+    :param query: the kadmin query to execute
+    :param admin_identity: the identity for the administrative user (optional)
+    :param default_realm: the default realm to assume
+    :return: return_code, out
+    """
+    if (query is not None) and (len(query) > 0):
+      auth_principal = None
+      auth_keytab_file = None
+
+      if admin_identity is not None:
+        auth_principal = get_property_value(admin_identity, 'principal')
+
+      if auth_principal is None:
+        kadmin = 'kadmin.local'
+        credential = ''
+      else:
+        kadmin = 'kadmin -p "%s"' % auth_principal
+
+        auth_password = get_property_value(admin_identity, 'password')
+
+        if auth_password is None:
+          auth_keytab = get_property_value(admin_identity, 'keytab')
+
+          if auth_keytab is not None:
+            (fd, auth_keytab_file) = tempfile.mkstemp()
+            keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+            os.write(fd, base64.b64decode(auth_keytab))
+            os.close(fd)
+
+          credential = '-k -t %s' % auth_keytab_file
+        else:
+          credential = '-w "%s"' % auth_password
+
+      if (default_realm is not None) and (len(default_realm) > 0):
+        realm = '-r %s' % default_realm
+      else:
+        realm = ''
+
+      try:
+        command = '%s %s %s -q "%s"' % (kadmin, credential, realm, query.replace('"', '\\"'))
+        return shell.checked_call(command)
+      except:
+        raise
+      finally:
+        if auth_keytab_file is not None:
+          os.remove(auth_keytab_file)
+
+  @staticmethod
+  def create_keytab_file(principal, path, auth_identity=None):
+    success = False
+
+    if (principal is not None) and (len(principal) > 0):
+      if (auth_identity is None) or (len(auth_identity) == 0):
+        norandkey = '-norandkey'
+      else:
+        norandkey = ''
+
+      if (path is not None) and (len(path) > 0):
+        keytab_file = '-k %s' % path
+      else:
+        keytab_file = ''
+
+      try:
+        result_code, output = KerberosScript.invoke_kadmin(
+          'ktadd %s %s %s' % (keytab_file, norandkey, principal),
+          auth_identity)
+
+        success = (result_code == 0)
+      except:
+        raise Fail("Failed to create keytab for principal: %s (in %s)" % (principal, path))
+
+    return success
+
+  @staticmethod
+  def create_keytab(principal, auth_identity=None):
+    keytab = None
+
+    (fd, temp_path) = tempfile.mkstemp()
+    os.remove(temp_path)
+
+    try:
+      if KerberosScript.create_keytab_file(principal, temp_path, auth_identity):
+        with open(temp_path, 'r') as f:
+          keytab = base64.b64encode(f.read())
+    finally:
+      if os.path.isfile(temp_path):
+        os.remove(temp_path)
+
+    return keytab
+
+  @staticmethod
+  def principal_exists(identity, auth_identity=None):
+    exists = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        try:
+          result_code, output = KerberosScript.invoke_kadmin('getprinc %s' % principal,
+                                                             auth_identity)
+          exists = (output is not None) and (("Principal: %s" % principal) in output)
+        except:
+          raise Fail("Failed to determine if principal exists: %s" % principal)
+
+    return exists
+
+  @staticmethod
+  def change_principal_password(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, output = KerberosScript.invoke_kadmin(
+            'change_password %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def create_principal(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, out = KerberosScript.invoke_kadmin(
+            'addprinc %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def clear_tmp_cache():
+    tmp_dir = Constants.AGENT_TMP_DIR
+    if tmp_dir is None:
+      tmp_dir = gettempdir()
+    curl_krb_cache_path = os.path.join(tmp_dir, "curl_krb_cache")
+    Directory(curl_krb_cache_path, action="delete")
+
+  @staticmethod
+  def create_principals(identities, auth_identity=None):
+    if identities is not None:
+      for identity in identities:
+        KerberosScript.create_principal(identity, auth_identity)
+
+  @staticmethod
+  def create_or_update_administrator_identity():
+    import params
+
+    if params.realm is not None:
+      admin_identity = params.get_property_value(params.realm, 'admin_identity')
+
+      if KerberosScript.principal_exists(admin_identity):
+        KerberosScript.change_principal_password(admin_identity)
+      else:
+        KerberosScript.create_principal(admin_identity)
+
+  @staticmethod
+  def test_kinit(identity, user="root"):
+    principal = get_property_value(identity, 'principal')
+    kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kdestroy_path_local = functions.get_kdestroy_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+    if principal is not None:
+      keytab_file = get_property_value(identity, 'keytab_file')
+      keytab = get_property_value(identity, 'keytab')
+      password = get_property_value(identity, 'password')
+
+      # If a test keytab file is available, simply use it
+      if (keytab_file is not None) and (os.path.isfile(keytab_file)):
+        keytab_file = keytab_file.replace("_HOST", params.hostname)
+        command = '%s -k -t %s %s' % (kinit_path_local, keytab_file, principal)
+        Execute(command,
+          user = user,
+        )
+        return shell.checked_call(kdestroy_path_local)
+
+      # If base64-encoded test keytab data is available; then decode it, write it to a temporary file
+      # use it, and then remove the temporary file
+      elif keytab is not None:
+        (fd, test_keytab_file) = tempfile.mkstemp()
+        os.write(fd, base64.b64decode(keytab))
+        os.close(fd)
+
+        try:
+          command = '%s -k -t %s %s' % (kinit_path_local, test_keytab_file, principal)
+          Execute(command,
+            user = user,
+          )
+          return shell.checked_call(kdestroy_path_local)
+        except:
+          raise
+        finally:
+          if test_keytab_file is not None:
+            os.remove(test_keytab_file)
+
+      # If no keytab data is available and a password was supplied, simply use it.
+      elif password is not None:
+        process = subprocess.Popen([kinit_path_local, principal], stdin=subprocess.PIPE)
+        stdout, stderr = process.communicate(password)
+        if process.returncode:
+          err_msg = Logger.filter_text("Execution of kinit returned %d. %s" % (process.returncode, stderr))
+          raise Fail(err_msg)
+        else:
+          return shell.checked_call(kdestroy_path_local)
+      else:
+        return 0, ''
+    else:
+      return 0, ''
+
+
+  def write_keytab_file(self):
+    import params
+    import stat
+
+    if params.kerberos_command_params is not None:
+      for item  in params.kerberos_command_params:
+        keytab_content_base64 = get_property_value(item, 'keytab_content_base64')
+        if (keytab_content_base64 is not None) and (len(keytab_content_base64) > 0):
+          keytab_file_path = get_property_value(item, 'keytab_file_path')
+          if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+            keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+            head, tail = os.path.split(keytab_file_path)
+            if head:
+              Directory(head, create_parents = True, mode=0755, owner="root", group="root")
+
+            owner = "root"
+            group = "root"
+            mode = 0
+
+            mode |= stat.S_IREAD | stat.S_IWRITE
+            mode |= stat.S_IRGRP | stat.S_IWGRP
+
+            keytab_content = base64.b64decode(keytab_content_base64)
+
+            # to hide content in command output
+            def make_lambda(data):
+              return lambda: data
+
+            File(keytab_file_path,
+                 content=make_lambda(keytab_content),
+                 mode=mode,
+                 owner=owner,
+                 group=group)
+
+            principal = get_property_value(item, 'principal')
+            if principal is not None:
+              curr_content = Script.structuredOut
+
+              if "keytabs" not in curr_content:
+                curr_content['keytabs'] = {}
+
+              curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = keytab_file_path
+
+              self.put_structured_out(curr_content)
+
+  def delete_keytab_file(self):
+    import params
+
+    if params.kerberos_command_params is not None:
+      for item in params.kerberos_command_params:
+        keytab_file_path = get_property_value(item, 'keytab_file_path')
+        if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+          keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+          # Delete the keytab file
+          File(keytab_file_path, action="delete")
+
+          principal = get_property_value(item, 'principal')
+          if principal is not None:
+            curr_content = Script.structuredOut
+
+            if "keytabs" not in curr_content:
+              curr_content['keytabs'] = {}
+
+            curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = '_REMOVED_'
+
+            self.put_structured_out(curr_content)
+
+  def setup_jce(self):
+    import params
+
+    if not params.jdk_name:
+      return
+    jce_curl_target = None
+    if params.jce_policy_zip is not None:
+      jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+      Directory(params.artifact_dir,
+                create_parents = True,
+                )
+      File(jce_curl_target,
+           content = DownloadSource(format("{jce_location}/{jce_policy_zip}")),
+           )
+    elif params.security_enabled:
+      # Something weird is happening
+      raise Fail("Security is enabled, but JCE policy zip is not specified.")
+
+    # The extraction will occur only after the security flag is set
+    if params.security_enabled:
+      security_dir = format("{java_home}/jre/lib/security")
+
+      File([format("{security_dir}/US_export_policy.jar"), format("{security_dir}/local_policy.jar")],
+           action = "delete",
+           )
+
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_curl_target, "-d", security_dir)
+      Execute(extract_cmd,
+              only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+              path = ['/bin/','/usr/bin'],
+              sudo = True
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
new file mode 100644
index 0000000..3533b35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
@@ -0,0 +1,200 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import get_property_value, get_unstructured_data
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions.expect import expect
+
+krb5_conf_dir = '/etc'
+krb5_conf_file = 'krb5.conf'
+krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+if OSCheck.is_suse_family():
+  kdc_conf_dir = '/var/lib/kerberos/krb5kdc'
+elif OSCheck.is_ubuntu_family():
+  kdc_conf_dir = '/etc/krb5kdc'
+else:
+  kdc_conf_dir = '/var/kerberos/krb5kdc'
+kdc_conf_file = 'kdc.conf'
+kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+kadm5_acl_dir = kdc_conf_dir  # Typically kadm5.acl and kdc.conf exist in the same directory
+kadm5_acl_file = 'kadm5.acl'
+kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+configurations = None
+keytab_details = None
+default_group = None
+kdc_server_host = None
+cluster_host_info = None
+
+hostname = config['hostname']
+
+kdb5_util_path = 'kdb5_util'
+
+kdamin_pid_path = '/var/run/kadmind.pid'
+krb5kdc_pid_path = '/var/run/krb5kdc.pid'
+
+smoke_test_principal = None
+smoke_test_keytab_file = None
+
+smoke_user = 'ambari-qa'
+
+manage_identities = 'true'
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+if config is not None:
+  kerberos_command_params = get_property_value(config, 'kerberosCommandParams')
+
+  cluster_host_info = get_property_value(config, 'clusterHostInfo')
+  if cluster_host_info is not None:
+    kdc_server_hosts = get_property_value(cluster_host_info, 'kdc_server_hosts')
+
+    if (kdc_server_hosts is not None) and (len(kdc_server_hosts) > 0):
+      kdc_server_host = kdc_server_hosts[0]
+
+  configurations = get_property_value(config, 'configurations')
+  if configurations is not None:
+    cluster_env = get_property_value(configurations, 'cluster-env')
+
+    if cluster_env is not None:
+      smoke_test_principal = get_property_value(cluster_env, 'smokeuser_principal_name', None, True, None)
+      smoke_test_keytab_file = get_property_value(cluster_env, 'smokeuser_keytab', None, True, None)
+      smoke_user = get_property_value(cluster_env, 'smokeuser', smoke_user, True, smoke_user)
+
+      default_group = get_property_value(cluster_env, 'user_group')
+
+      if default_group is None:
+        default_group = get_property_value(cluster_env, 'user-group')
+
+    # ##############################################################################################
+    # Get krb5.conf template data
+    # ##############################################################################################
+    realm = 'EXAMPLE.COM'
+    domains = ''
+    kdc_hosts = 'localhost'
+    admin_server_host = None
+    admin_principal = None
+    admin_password = None
+    admin_keytab = None
+    test_principal = None
+    test_password = None
+    test_keytab = None
+    test_keytab_file = None
+    encryption_types = None
+    manage_krb5_conf = "true"
+    krb5_conf_template = None
+
+    krb5_conf_data = get_property_value(configurations, 'krb5-conf')
+
+    kerberos_env = get_property_value(configurations, "kerberos-env")
+
+    if kerberos_env is not None:
+      manage_identities = get_property_value(kerberos_env, "manage_identities", "true", True, "true")
+      encryption_types = get_property_value(kerberos_env, "encryption_types", None, True, None)
+      realm = get_property_value(kerberos_env, "realm", None, True, None)
+      kdc_hosts = get_property_value(kerberos_env, 'kdc_hosts', kdc_hosts)
+      admin_server_host = get_property_value(kerberos_env, 'admin_server_host', admin_server_host)
+
+    if krb5_conf_data is not None:
+      realm = get_property_value(krb5_conf_data, 'realm', realm)
+      domains = get_property_value(krb5_conf_data, 'domains', domains)
+
+      admin_principal = get_property_value(krb5_conf_data, 'admin_principal', admin_principal, True, None)
+      admin_password = get_property_value(krb5_conf_data, 'admin_password', admin_password, True, None)
+      admin_keytab = get_property_value(krb5_conf_data, 'admin_keytab', admin_keytab, True, None)
+
+      test_principal = get_property_value(krb5_conf_data, 'test_principal', test_principal, True, None)
+      test_password = get_property_value(krb5_conf_data, 'test_password', test_password, True, None)
+      test_keytab = get_property_value(krb5_conf_data, 'test_keytab', test_keytab, True, None)
+      test_keytab_file = get_property_value(krb5_conf_data, 'test_keytab_file', test_keytab_file, True, None)
+
+      krb5_conf_template = get_property_value(krb5_conf_data, 'content', krb5_conf_template)
+      krb5_conf_dir = get_property_value(krb5_conf_data, 'conf_dir', krb5_conf_dir)
+      krb5_conf_file = get_property_value(krb5_conf_data, 'conf_file', krb5_conf_file)
+      krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+      manage_krb5_conf = get_property_value(krb5_conf_data, 'manage_krb5_conf', "true")
+
+    # For backward compatibility, ensure that kdc_host exists. This may be needed if the krb5.conf
+    # template in krb5-conf/content had not be updated during the Ambari upgrade to 2.4.0 - which
+    # will happen if the template was altered from its stack-default value.
+    kdc_host_parts = kdc_hosts.split(',')
+    if kdc_host_parts:
+      kdc_host = kdc_host_parts[0]
+    else:
+      kdc_host = kdc_hosts
+
+    # ##############################################################################################
+    # Get kdc.conf template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = "88"
+    kdcdefaults_kdc_tcp_ports = "88"
+
+    kdc_conf_template = None
+
+    kdc_conf_data = get_property_value(configurations, 'kdc-conf')
+
+    if kdc_conf_data is not None:
+      kdcdefaults_kdc_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_ports', kdcdefaults_kdc_ports)
+      kdcdefaults_kdc_tcp_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_tcp_ports', kdcdefaults_kdc_tcp_ports)
+
+      kdc_conf_template = get_property_value(kdc_conf_data, 'content', kdc_conf_template)
+      kdc_conf_dir = get_property_value(kdc_conf_data, 'conf_dir', kdc_conf_dir)
+      kdc_conf_file = get_property_value(kdc_conf_data, 'conf_file', kdc_conf_file)
+      kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+    # ##############################################################################################
+    # Get kadm5.acl template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = '88'
+    kdcdefaults_kdc_tcp_ports = '88'
+
+    kadm5_acl_template = None
+
+    kadm5_acl_data = get_property_value(configurations, 'kadm5-acl')
+
+    if kadm5_acl_data is not None:
+      kadm5_acl_template = get_property_value(kadm5_acl_data, 'content', kadm5_acl_template)
+      kadm5_acl_dir = get_property_value(kadm5_acl_data, 'conf_dir', kadm5_acl_dir)
+      kadm5_acl_file = get_property_value(kadm5_acl_data, 'conf_file', kadm5_acl_file)
+      kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+  # ################################################################################################
+  # Get commandParams
+  # ################################################################################################
+  command_params = get_property_value(config, 'commandParams')
+  if command_params is not None:
+    keytab_details = get_unstructured_data(command_params, 'keytab')
+
+    if manage_identities:
+      smoke_test_principal = get_property_value(command_params, 'principal_name', smoke_test_principal)
+      smoke_test_keytab_file = get_property_value(command_params, 'keytab_file', smoke_test_keytab_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
new file mode 100644
index 0000000..555a93c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+
+class ServiceCheck(Script):
+
+    def service_check(self, env):
+        print "Service Check"
+
+if __name__ == "__main__":
+    ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
new file mode 100644
index 0000000..bbae4a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
@@ -0,0 +1,32 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+smoke_user_principal = config['configurations']['cluster-env']['smokeuser_principal_name']

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
new file mode 100644
index 0000000..199e6d7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
@@ -0,0 +1,105 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+def get_property_value(dictionary, property_name, default_value=None, trim_string=False,
+                       empty_value=""):
+  """
+  Get a property value from a dictionary, applying applying rules as necessary.
+
+  If dictionary does not contain a value for property_name or the value for property_name is None,
+  null_value is used as the value to return.  Then, if trim_string is True and the value is None
+  or the value is an empty string, empty_value will be return else the (current) value is returned.
+
+  Note: the property value will most likely be a string or a unicode string, however in the event
+  it is not (for example a number), this method will behave properly and return the value as is.
+
+  :param dictionary: a dictionary of values
+  :param property_name: the name of a dictionary item to retrieve
+  :param default_value: the value to use if the item is not in the dictionary or the value of the item is None
+  :param trim_string: a Boolean value indicating whether to strip whitespace from the value (True) or not (False)
+  :param empty_value: the value to use if the (current) value is None or an empty string, if trim_string is True
+  :return: the requested property value with rules applied
+  """
+  # If property_name is not in the dictionary, set value to null_value
+  if property_name in dictionary:
+    value = dictionary[property_name]
+    if value is None:
+      value = default_value
+  else:
+    value = default_value
+
+  if trim_string:
+    # If the value is none, consider it empty...
+    if value is None:
+      value = empty_value
+    elif (type(value) == str) or (type(value) == unicode):
+      value = value.strip()
+
+      if len(value) == 0:
+        value = empty_value
+
+  return value
+
+def get_unstructured_data(dictionary, property_name):
+  prefix = property_name + '/'
+  prefix_len = len(prefix)
+  return dict((k[prefix_len:], v) for k, v in dictionary.iteritems() if k.startswith(prefix))
+
+def split_host_and_port(host):
+  """
+  Splits a string into its host and port components
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :return: a Dictionary containing 'host' and 'port' entries for the input value
+  """
+
+  if host is None:
+    host_and_port = None
+  else:
+    host_and_port = {}
+    parts = host.split(":")
+
+    if parts is not None:
+      length = len(parts)
+
+      if length > 0:
+        host_and_port['host'] = parts[0]
+
+        if length > 1:
+          host_and_port['port'] = int(parts[1])
+
+  return host_and_port
+
+def set_port(host, port):
+  """
+  Sets the port for a host specification, potentially replacing an existing port declaration
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :param port: a string or integer declaring the (new) port
+  :return: a string declaring the new host/port specification
+  """
+  if port is None:
+    return host
+  else:
+    host_and_port = split_host_and_port(host)
+
+    if (host_and_port is not None) and ('host' in host_and_port):
+      return "%s:%s" % (host_and_port['host'], port)
+    else:
+      return host

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2 b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
new file mode 100644
index 0000000..0191953
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
@@ -0,0 +1,54 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  default_ccache_name = /tmp/krb5cc_%{uid}
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+{% if domains %}
+[domain_realm]
+{%- for domain in domains.split(',') %}
+  {{domain|trim()}} = {{realm}}
+{%- endfor %}
+{% endif %}
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts > 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length > 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/alerts.json
new file mode 100644
index 0000000..a1b075a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/alerts.json
@@ -0,0 +1,20 @@
+{
+    "SLEEPY": {
+
+        "SLEEPY": [
+            {
+                "name": "sleepy_process",
+                "label": "Sleepy Process",
+                "description": "Alert for sleepy component process status",
+                "interval": 1,
+                "scope": "HOST",
+                "enabled": true,
+                "source": {
+                    "type": "SCRIPT",
+                    "path": "PERF/1.0/services/SLEEPY/package/alerts/alert_sleepy_process.py",
+                    "parameters": []
+                }
+            }
+        ]
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file


[15/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..e33b91d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,814 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+    <description>
+      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of hostname:port pairs defining the zookeeper quorum binding for the registry
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>Enable the node manager to recover after starting</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.dir</name>
+    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
+    <description>
+      The local filesystem directory in which the node manager will store
+      state when recovery is enabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+    <value>10000</value>
+    <description>Time interval between each attempt to connect to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
+    <value>60000</value>
+    <description>Max time to wait to establish a connection to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+    <display-name>Enable Work Preserving Restart</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+    <description>
+      The class to use as the persistent store.
+      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
+      the store is implicitly fenced; meaning a single ResourceManager
+      is able to use the store at any point in time.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
+    <value>/rmstore</value>
+    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-acl</name>
+    <value>world:anyone:rwcda</value>
+    <description>ACL's to be used for ZooKeeper znodes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
+    <value>10000</value>
+    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
+    <value>1000</value>
+    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+      When HA is enabled, the value here is NOT used. It is generated
+      automatically from yarn.resourcemanager.zk-timeout-ms and
+      yarn.resourcemanager.zk-num-retries."
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-num-retries</name>
+    <value>1000</value>
+    <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-timeout-ms</name>
+    <value>10000</value>
+    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
+    <value>${yarn.resourcemanager.max-completed-applications}</value>
+    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value> </value>
+    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
+    <value>/cgroup</value>
+    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description>Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
+    <value>1000</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+    <value>90</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <value>-1</value>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
+    <value>false</value>
+    <description>
+      This configuration is for debug and test purpose.
+      By setting this configuration as true.
+      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
+    <value>10</value>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.max-retries</name>
+    <value>30</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.retry-interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <description>
+      Enable age off of timeline store data.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.state-store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
+    <description>Store class name for timeline state store.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-state-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb state store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb timeline store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
+    <value>104857600</value>
+    <description>
+      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value>/system/yarn/node-labels</value>
+    <description>
+      URI for NodeLabelManager.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description/>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description/>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>core-site</type>
+        <name>hadoop.security.authentication</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>cluster-env</type>
+        <name>user_group</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- In HDP 2.3, these properties were deleted:
+  yarn.node-labels.manager-class
+  -->
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
+    <value>true</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.authorization-provider</name>
+    <description> Yarn authorization provider class. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!--ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.version</name>
+    <value>1.5</value>
+    <description>Timeline service version we&#x2019;re currently using.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
+    <description>Main storage class for YARN timeline server.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
+    <value>/ats/active/</value>
+    <description>DFS path to store active application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
+    <value>/ats/done/</value>
+    <description>DFS path to store done application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
+    <value/>
+    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- advanced ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
+    <!-- Use rolling leveldb, advanced -->
+    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
+    <!-- Default is 60 seconds, advanced -->
+    <value>60</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
+    <!-- 3600 is default, advanced -->
+    <value>3600</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
+    <!-- 7 days is default, advanced -->
+    <value>604800</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.4 -->
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
+    <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
+    <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <value>3600</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
+    <value>60000</value>
+    <description>The delay time ms to unregister container metrics after completion.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
+    <value/>
+    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
new file mode 100644
index 0000000..e690204
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
@@ -0,0 +1,278 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
new file mode 100644
index 0000000..7e1fd78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <version>2.7.1.3.0</version>
+      <extends>common-services/YARN/2.1.0.2.0</extends>
+
+      <components>
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+
+          <commandScript>
+            <script>scripts/application_timeline_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK/SPARK_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}-yarn</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-mapreduce</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-hdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <version>2.7.1.3.0</version>
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
new file mode 100644
index 0000000..5ffbc07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"mapreduce.jobhistory.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"mapred-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "jobhistory_ui",
+        "label": "JobHistory UI",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name": "jobhistory_logs",
+        "label": "JobHistory logs",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"jobhistory_jmx",
+        "label":"JobHistory JMX",
+        "requires_user_name":"false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
new file mode 100644
index 0000000..37248d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"yarn.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"yarn-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "resourcemanager_ui",
+        "label": "ResourceManager UI",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_logs",
+        "label": "ResourceManager logs",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_jmx",
+        "label":"ResourceManager JMX",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
new file mode 100644
index 0000000..5019447
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
@@ -0,0 +1,132 @@
+{
+  "name": "default",
+  "description": "Default theme for MAPREDUCE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-mr-scheduler",
+                  "display-name": "MapReduce",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-mr-scheduler-row1-col1",
+                      "display-name": "MapReduce Framework",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row2-col1",
+                      "display-name": "MapReduce AppMaster",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.map.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.reduce.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col2"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+          "subsection-name": "subsection-mr-scheduler-row2-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.task.io.sort.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col3"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.map.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.reduce.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.task.io.sort.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
new file mode 100644
index 0000000..758cf0c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
@@ -0,0 +1,250 @@
+{
+  "name": "default",
+  "description": "Default theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-nm-sizing",
+                  "display-name": "Memory",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-nm-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-nm-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-yarn-platform-features",
+                  "display-name": "YARN Features",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-yarn-platform-features-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-container-sizing",
+                  "display-name": "CPU",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-container-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-container-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+          "subsection-name": "subsection-nm-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.enabled",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-env/yarn_cgroups_enabled",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_cgroups_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..5d4fb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.6.3.0</version>
+      <extends>common-services/ZOOKEEPER/3.4.6</extends>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper_${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper-${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper-${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
index 7cd9340..1058c75 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
@@ -22,7 +22,7 @@ import nturl2path
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
 
 config = Script.get_config()
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7d6dac4..7df00ee 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -71,5 +71,30 @@
     <description>Security</description>
     <on-ambari-upgrade add="false"/>
   </property>
-
+  <property>
+    <name>user_group</name>
+    <display-name>Hadoop Group</display-name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Hadoop user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <display-name>Smoke User</display-name>
+    <value>ambari-qa</value>
+    <property-type>USER</property-type>
+    <description>User executing service checks</description>
+    <value-attributes>
+      <type>user</type>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
new file mode 100644
index 0000000..a2a667b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/home/_HOST/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "/etc/security/keytabs/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
index 7952789..3cf8ea1 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>grumpy_user</name>
+    <display-name>grumpy User</display-name>
+    <value>grumpy</value>
+    <property-type>USER</property-type>
+    <description>grumpy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
new file mode 100644
index 0000000..3bf5fbd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "GRUMPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "grumpy",
+          "principal": {
+            "value": "${grumpy-site/grumpy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "grumpy-site/grumpy_principal_name",
+            "local_username": "${grumpy-site/grumpy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/grumpy.headless.keytab",
+            "owner": {
+              "name": "${grumpy-site/grumpy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "grumpy-site/grumpy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "GRUMPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "grumpy_grumpy",
+              "principal": {
+                "value": "grumpy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "grumpy-site/grumpy.grumpy.kerberos.principal",
+                "local_username": "${grumpy-site/grumpy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/grumpy.service.keytab",
+                "owner": {
+                  "name": "${grumpy-site/grumpy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "grumpy-site/grumpy.grumpy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "grumpy-site/grumpy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "grumpy-site/grumpy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
index cf4206c..de2f4b4 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Grumpy(Dummy):
   def __init__(self):
     super(Grumpy, self).__init__()
     self.component_name = "GRUMPY"
+    self.principal_conf_name = "grumpy-site"
+    self.principal_name = "grumpy.grumpy.kerberos.principal"
+    self.keytab_conf_name = "grumpy-site"
+    self.keytab_name = "grumpy.grumpy.keytab.file"
 
 if __name__ == "__main__":
   Grumpy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/alerts.json
new file mode 100644
index 0000000..848a60d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/alerts.json
@@ -0,0 +1,20 @@
+{
+    "HAPPY": {
+
+        "HAPPY": [
+            {
+                "name": "happy_process",
+                "label": "Happy Process",
+                "description": "Alert for happy component process status",
+                "interval": 1,
+                "scope": "HOST",
+                "enabled": true,
+                "source": {
+                    "type": "SCRIPT",
+                    "path": "PERF/1.0/services/HAPPY/package/alerts/alert_happy_process.py",
+                    "parameters": []
+                }
+            }
+        ]
+    }
+}


[14/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
index 67762a5..693046e 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>happy_user</name>
+    <display-name>happy User</display-name>
+    <value>happy</value>
+    <property-type>USER</property-type>
+    <description>happy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
new file mode 100644
index 0000000..ae84eaf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "HAPPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "happy",
+          "principal": {
+            "value": "${happy-site/happy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "happy-site/happy_principal_name",
+            "local_username": "${happy-site/happy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/happy.headless.keytab",
+            "owner": {
+              "name": "${happy-site/happy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "happy-site/happy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "HAPPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "happy_happy",
+              "principal": {
+                "value": "happy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "happy-site/happy.happy.kerberos.principal",
+                "local_username": "${happy-site/happy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/happy.service.keytab",
+                "owner": {
+                  "name": "${happy-site/happy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "happy-site/happy.happy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "happy-site/happy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "happy-site/happy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/metainfo.xml
index 36a2168..53cc11b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/metainfo.xml
@@ -52,6 +52,11 @@
           <default>true</default>
         </theme>
       </themes>
+
+      <configuration-dependencies>
+        <config-type>happy-alert-config</config-type>
+      </configuration-dependencies>
+
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/alerts/alert_happy_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/alerts/alert_happy_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/alerts/alert_happy_process.py
new file mode 100644
index 0000000..469179c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/alerts/alert_happy_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{happy-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{happy-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{happy-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{happy-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{happy-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
index b86c4c7..dc8198d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Happy(Dummy):
   def __init__(self):
     super(Happy, self).__init__()
     self.component_name = "HAPPY"
+    self.principal_conf_name = "happy-site"
+    self.principal_name = "happy.happy.kerberos.principal"
+    self.keytab_conf_name = "happy-site"
+    self.keytab_name = "happy.happy.keytab.file"
 
 if __name__ == "__main__":
   Happy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json
index 1b3ae25..24141e8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/alerts.json
@@ -1,98 +1,18 @@
 {
   "HBASE": {
-    "service": [
-      {
-        "name": "hbase_regionserver_process_percent",
-        "label": "Percent RegionServers Available",
-        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "hbase_regionserver_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }    
-    ],
+
     "HBASE_MASTER": [
       {
         "name": "hbase_master_process",
         "label": "HBase Master Process",
         "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
         "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{hbase-site/hbase.master.port}}",
-          "default_port": 60000,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "hbase_master_cpu",
-        "label": "HBase Master CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
+        "scope": "HOST",
         "enabled": true,
         "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hbase-site/hbase.master.info.port}}",
-            "default_port": 60010,
-            "connection_timeout": 5.0,
-            "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}",
-            "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
+          "type": "SCRIPT",
+          "path": "PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py",
+          "parameters": []
         }
       }
     ],
@@ -103,23 +23,11 @@
         "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
         "interval": 1,
         "scope": "HOST",
+        "enabled": true,
         "source": {
-          "type": "PORT",
-          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
-          "default_port": 60030,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
+          "type": "SCRIPT",
+          "path": "PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py",
+          "parameters": []
         }
       }
     ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/configuration/hbase-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml
index b304706..f1f4b7d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/metainfo.xml
@@ -168,6 +168,7 @@
       <osSpecifics></osSpecifics>
 
       <configuration-dependencies>
+        <config-type>hbase-alert-config</config-type>
         <config-type>core-site</config-type> <!-- hbase puts core-site in it's folder -->
         <config-type>hbase-policy</config-type>
         <config-type>hbase-site</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
new file mode 100644
index 0000000..7662c1f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hbase-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hbase-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hbase-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hbase-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hbase-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
new file mode 100644
index 0000000..7662c1f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{hbase-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{hbase-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{hbase-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{hbase-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{hbase-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
index 7ea42ab..3761fcf 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
@@ -33,6 +33,10 @@ class HBaseMaster(Dummy):
   def __init__(self):
     super(HBaseMaster, self).__init__()
     self.component_name = "HBASE_MASTER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "hbase.master.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "hbase.master.keytab.file"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
index a866715..101c36f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
@@ -33,6 +33,10 @@ class HBaseRegionServer(Dummy):
   def __init__(self):
     super(HBaseRegionServer, self).__init__()
     self.component_name = "HBASE_REGIONSERVER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "hbase.regionserver.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "hbase.regionserver.keytab.file"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
index 9cf3cec..76a49d9 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
@@ -33,6 +33,10 @@ class PhoenixQueryServer(Dummy):
   def __init__(self):
     super(PhoenixQueryServer, self).__init__()
     self.component_name = "PHOENIX_QUERY_SERVER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "phoenix.queryserver.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "phoenix.queryserver.keytab.file"
 
 if __name__ == "__main__":
   PhoenixQueryServer().execute()


[25/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
Merge from branch-2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4278c4a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4278c4a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4278c4a4

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 4278c4a4f1240270d4f74636fecc6b590b722432
Parents: 1c5c7df
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Thu Dec 8 15:28:54 2016 -0800
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Thu Dec 8 15:28:54 2016 -0800

----------------------------------------------------------------------
 .../admin-web/app/scripts/services/Cluster.js   |   51 -
 .../app/scripts/services/RoleDetailsModal.js    |   31 +-
 .../app/views/modals/RoleDetailsModal.html      |    6 +-
 ambari-agent/conf/unix/ambari-agent.ini         |    1 +
 .../src/main/python/ambari_agent/ActionQueue.py |   11 +
 .../ambari_agent/CustomServiceOrchestrator.py   |    7 +-
 .../src/main/python/ambari_agent/FileCache.py   |   12 +-
 .../src/main/python/ambari_agent/Hardware.py    |   52 +-
 .../src/main/python/ambari_agent/HostInfo.py    |   12 +-
 .../test/python/ambari_agent/TestActionQueue.py |   48 +
 .../TestCustomServiceOrchestrator.py            |   34 +-
 .../test/python/ambari_agent/TestFileCache.py   |   10 +-
 .../test/python/ambari_agent/TestHardware.py    |   70 +
 .../ambari_commons/ambari_metrics_helper.py     |   52 +-
 .../main/python/ambari_commons/logging_utils.py |    5 +-
 .../ambari_commons/parallel_processing.py       |   95 +
 .../libraries/functions/constants.py            |    1 +
 .../libraries/functions/copy_tarball.py         |    2 +-
 .../dynamic_variable_interpretation.py          |    2 +-
 .../libraries/functions/package_conditions.py   |    2 +-
 .../libraries/functions/setup_atlas_hook.py     |   47 +-
 .../simulate_perf_cluster_alert_behaviour.py    |  108 ++
 .../libraries/script/dummy.py                   |   11 +
 .../ambari/logfeeder/mapper/MapperDateTest.java |   56 -
 .../org/apache/ambari/logsearch/LogSearch.java  |    4 +-
 .../ambari/logsearch/dao/SolrCollectionDao.java |   64 +-
 .../web/listener/LogSearchSessionListener.java  |   48 +
 .../docker/test-config/logsearch/log4j.xml      |    2 +-
 .../timeline/AbstractTimelineMetricsSink.java   |   51 +-
 .../availability/MetricCollectorHAHelper.java   |    1 -
 .../AbstractTimelineMetricSinkTest.java         |   10 +-
 .../availability/MetricCollectorHATest.java     |    1 -
 .../cache/HandleConnectExceptionTest.java       |    4 +
 .../conf/unix/ambari-metrics-grafana            |    6 +-
 .../timeline/HadoopTimelineMetricsSink.java     |    4 +-
 .../timeline/HadoopTimelineMetricsSinkTest.java |    6 +-
 .../src/main/python/core/config_reader.py       |    9 +-
 .../src/test/python/core/TestEmitter.py         |    2 +-
 .../timeline/HBaseTimelineMetricStore.java      |   18 +-
 .../timeline/TimelineMetricConfiguration.java   |   14 +
 .../aggregators/AbstractTimelineAggregator.java |    6 +-
 .../MetricCollectorHAController.java            |    4 +-
 .../TestApplicationHistoryServer.java           |   14 +-
 .../MetricCollectorHAControllerTest.java        |    4 +-
 ambari-project/pom.xml                          |   41 +
 ambari-server/checkstyle.xml                    |   17 +
 ambari-server/pom.xml                           |    4 +
 .../ambari/server/agent/AgentRequests.java      |    2 +-
 .../ambari/server/agent/ExecutionCommand.java   |    1 +
 .../ambari/server/checks/CheckDescription.java  |    4 +-
 .../AmbariManagementControllerImpl.java         |   54 +-
 .../internal/AlertTargetResourceProvider.java   |    2 +-
 .../BlueprintConfigurationProcessor.java        |    3 +-
 .../controller/internal/CalculatedStatus.java   |   52 +-
 .../ClusterStackVersionResourceProvider.java    |    4 +-
 .../internal/ConfigGroupResourceProvider.java   |   60 +-
 .../internal/RequestResourceProvider.java       |   34 +-
 .../internal/StageResourceProvider.java         |    2 +-
 .../internal/UpgradeResourceProvider.java       |    2 +-
 .../logging/LogSearchDataRetrievalService.java  |    7 +-
 .../controller/logging/LoggingCookieStore.java  |   44 +
 .../logging/LoggingRequestHelperImpl.java       |   42 +-
 .../apache/ambari/server/orm/dao/AlertsDAO.java |    6 +-
 .../entities/ClusterConfigMappingEntity.java    |   20 +-
 .../ambari/server/orm/entities/GroupEntity.java |    5 +-
 .../server/security/authorization/Users.java    |   10 +-
 .../serveraction/upgrades/ConfigureAction.java  |   16 +-
 .../serveraction/upgrades/FixLzoCodecPath.java  |   16 +-
 .../upgrades/FixOozieAdminUsers.java            |    9 +-
 .../upgrades/HBaseConfigCalculation.java        |   14 +-
 .../HBaseEnvMaxDirectMemorySizeAction.java      |   13 +-
 .../upgrades/HiveEnvClasspathAction.java        |   13 +-
 .../upgrades/HiveZKQuorumConfigAction.java      |    2 +-
 .../upgrades/OozieConfigCalculation.java        |   13 +-
 .../upgrades/RangerConfigCalculation.java       |    4 +-
 .../RangerKerberosConfigCalculation.java        |   20 +-
 .../upgrades/RangerKmsProxyConfig.java          |    3 +-
 .../upgrades/SparkShufflePropertyConfig.java    |    3 +-
 .../upgrades/YarnConfigCalculation.java         |    2 +-
 .../apache/ambari/server/stack/StackModule.java |  101 +-
 .../ambari/server/state/ComponentInfo.java      |   14 +
 .../org/apache/ambari/server/state/Config.java  |   22 +-
 .../ambari/server/state/ConfigFactory.java      |   20 +-
 .../apache/ambari/server/state/ConfigImpl.java  |  480 +++--
 .../apache/ambari/server/state/ServiceImpl.java |    2 +-
 .../server/state/cluster/ClusterImpl.java       |  110 +-
 .../server/state/cluster/ClustersImpl.java      |    2 +-
 .../server/state/configgroup/ConfigGroup.java   |   33 +-
 .../state/configgroup/ConfigGroupFactory.java   |   34 +-
 .../state/configgroup/ConfigGroupImpl.java      |  613 +++----
 .../ambari/server/state/host/HostImpl.java      |    2 +-
 .../services/RetryUpgradeActionService.java     |    2 +-
 .../ambari/server/state/stack/UpgradePack.java  |   31 +-
 .../stack/upgrade/ConfigurationCondition.java   |   72 +-
 .../svccomphost/ServiceComponentHostImpl.java   |    2 +-
 .../ambari/server/topology/AmbariContext.java   |   25 +-
 .../ambari/server/topology/TopologyManager.java |   19 +-
 .../ambari/server/update/HostUpdateHelper.java  |   10 +-
 .../server/upgrade/AbstractUpgradeCatalog.java  |   25 +-
 .../server/upgrade/UpgradeCatalog211.java       |   24 +-
 .../server/upgrade/UpgradeCatalog240.java       |    4 +-
 .../server/upgrade/UpgradeCatalog250.java       |   53 +-
 .../ambari/server/utils/RequestUtils.java       |   10 +
 .../python/ambari_server/serverConfiguration.py |   14 +-
 .../main/python/ambari_server/serverUpgrade.py  |   38 +-
 .../src/main/python/ambari_server/utils.py      |   23 +-
 .../src/main/python/ambari_server_main.py       |    8 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   11 +-
 .../1.6.1.2.2.0/package/scripts/params.py       |    2 +-
 .../AMBARI_INFRA/0.1.0/metainfo.xml             |    1 +
 .../0.1.0/package/scripts/setup_infra_solr.py   |    3 +-
 .../0.1.0/configuration/ams-env.xml             |    2 +-
 .../0.1.0/configuration/ams-site.xml            |   12 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |    1 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |    1 +
 .../0.1.0/package/scripts/metrics_collector.py  |    2 +
 .../package/scripts/metrics_grafana_util.py     |   55 +-
 .../0.1.0/package/scripts/params.py             |   28 +-
 .../0.1.0/package/scripts/service_check.py      |  226 +--
 .../metrics_grafana_datasource.json.j2          |    4 +-
 .../0.5.0.2.1/configuration/falcon-env.xml      |   17 +
 .../FALCON/0.5.0.2.1/metainfo.xml               |    7 +
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py  |    2 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |    5 +-
 .../FALCON/0.5.0.2.1/quicklinks/quicklinks.json |   35 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |    2 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |    2 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |    2 +-
 .../package/alerts/alert_metrics_deviation.py   |    2 +-
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |   17 +
 .../HIVE/0.12.0.2.0/package/scripts/hcat.py     |    2 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |    2 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |    5 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |    2 +-
 .../KAFKA/0.8.1/package/scripts/params.py       |    2 +-
 .../0.5.0/configuration/logfeeder-env.xml       |   14 +
 .../0.5.0/configuration/logsearch-env.xml       |   94 +-
 .../configuration/logsearch-properties.xml      |    2 +-
 .../LOGSEARCH/0.5.0/metainfo.xml                |   35 +-
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |   79 +-
 .../0.5.0/package/scripts/setup_logfeeder.py    |    2 +-
 .../0.5.0/package/scripts/setup_logsearch.py    |   13 +-
 .../0.5.0/properties/logfeeder-env.sh.j2        |    2 +-
 .../0.5.0/properties/logsearch-env.sh.j2        |    2 +-
 .../0.5.0/properties/logsearch-log4j.xml.j2     |    4 +-
 .../0.5.0/properties/output.config.json.j2      |    6 +-
 .../LOGSEARCH/0.5.0/themes/theme.json           |  253 +++
 .../RANGER/0.4.0/package/scripts/params.py      |    1 +
 .../0.4.0/package/scripts/ranger_admin.py       |   36 +-
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml |   17 +
 .../1.4.4.2.0/configuration/sqoop-site.xml      |    6 +
 .../1.4.4.2.0/package/scripts/params_linux.py   |    5 +-
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py    |    2 +-
 .../STORM/0.10.0/configuration/storm-env.xml    |   17 +
 .../STORM/0.9.1/package/scripts/params_linux.py |    8 +-
 .../STORM/0.9.1/package/scripts/storm.py        |    2 +-
 .../2.1.0.2.0/package/scripts/service_check.py  |   66 +-
 .../ZOOKEEPER/3.4.6/metainfo.xml                |    2 +-
 .../scripts/post-user-creation-hook.sh          |    7 +
 .../src/main/resources/scripts/stack_advisor.py |   11 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   10 +
 .../before-ANY/scripts/shared_initialization.py |    6 +-
 .../2.0.6/hooks/before-START/scripts/params.py  |    2 +-
 .../HDP/2.0.6/properties/stack_features.json    |    5 +
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |   47 +-
 .../stacks/HDP/2.1/services/stack_advisor.py    |   20 +-
 .../services/HIVE/configuration/hive-site.xml   |    4 +
 .../stacks/HDP/2.2/services/stack_advisor.py    |   62 +-
 .../services/STORM/configuration/storm-site.xml |    4 +
 .../stacks/HDP/2.3/services/stack_advisor.py    |   61 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |   27 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |   27 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |   27 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |   27 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |    4 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |    4 +-
 .../services/HDFS/configuration/hadoop-env.xml  |  176 ++
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |   27 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |   27 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |   27 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |    4 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |    4 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   12 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml |   27 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   31 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |    3 +
 .../HDP/2.6/services/ACCUMULO/metainfo.xml      |    2 +-
 .../stacks/HDP/2.6/services/FALCON/metainfo.xml |    2 +-
 .../stacks/HDP/2.6/services/FLUME/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/HBASE/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/HDFS/metainfo.xml   |    2 +-
 .../stacks/HDP/2.6/services/HIVE/metainfo.xml   |    2 +-
 .../stacks/HDP/2.6/services/KAFKA/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/KNOX/metainfo.xml   |    2 +-
 .../stacks/HDP/2.6/services/MAHOUT/metainfo.xml |    2 +-
 .../stacks/HDP/2.6/services/OOZIE/metainfo.xml  |    1 +
 .../stacks/HDP/2.6/services/PIG/metainfo.xml    |    2 +-
 .../stacks/HDP/2.6/services/RANGER/metainfo.xml |    2 +-
 .../HDP/2.6/services/RANGER_KMS/metainfo.xml    |    2 +-
 .../stacks/HDP/2.6/services/SLIDER/metainfo.xml |    2 +-
 .../services/SPARK/configuration/livy-conf.xml  |   73 +
 .../configuration/livy-spark-blacklist.xml      |   52 +
 .../stacks/HDP/2.6/services/SPARK/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/SPARK2/metainfo.xml |    2 +-
 .../stacks/HDP/2.6/services/SQOOP/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/TEZ/metainfo.xml    |    2 +-
 .../stacks/HDP/2.6/services/YARN/metainfo.xml   |    4 +-
 .../HDP/2.6/services/ZOOKEEPER/metainfo.xml     |    2 +-
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   27 +-
 .../HDP/3.0/configuration/cluster-env.xml       |  293 +++
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |   37 +
 .../3.0/hooks/after-INSTALL/scripts/params.py   |   97 +
 .../scripts/shared_initialization.py            |  111 ++
 .../hooks/before-ANY/files/changeToSecureUid.sh |   53 +
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |   36 +
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |  231 +++
 .../before-ANY/scripts/shared_initialization.py |  226 +++
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |   37 +
 .../3.0/hooks/before-INSTALL/scripts/params.py  |  113 ++
 .../scripts/repo_initialization.py              |   68 +
 .../scripts/shared_initialization.py            |   37 +
 .../3.0/hooks/before-RESTART/scripts/hook.py    |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/task-log4j.properties    |  134 ++
 .../hooks/before-START/files/topology_script.py |   66 +
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |   39 +
 .../3.0/hooks/before-START/scripts/params.py    |  326 ++++
 .../before-START/scripts/rack_awareness.py      |   47 +
 .../scripts/shared_initialization.py            |  191 ++
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |  105 ++
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../main/resources/stacks/HDP/3.0/kerberos.json |   78 +
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |   24 +
 .../HDP/3.0/properties/stack_features.json      |  323 ++++
 .../stacks/HDP/3.0/properties/stack_tools.json  |    4 +
 .../resources/stacks/HDP/3.0/repos/repoinfo.xml |  132 ++
 .../services/HDFS/configuration/core-site.xml   |   56 +
 .../services/HDFS/configuration/hadoop-env.xml  |  200 ++
 .../services/HDFS/configuration/hdfs-log4j.xml  |  226 +++
 .../services/HDFS/configuration/hdfs-site.xml   |  153 ++
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  217 +++
 .../ranger-hdfs-plugin-properties.xml           |   98 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 +
 .../HDFS/configuration/ranger-hdfs-security.xml |   65 +
 .../services/HDFS/configuration/widgets.json    |  649 +++++++
 .../stacks/HDP/3.0/services/HDFS/kerberos.json  |  246 +++
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   |  190 ++
 .../services/HDFS/quicklinks/quicklinks.json    |   80 +
 .../HDP/3.0/services/HDFS/themes/theme.json     |  179 ++
 .../HDP/3.0/services/YARN/YARN_widgets.json     |  670 +++++++
 .../YARN/configuration-mapred/mapred-env.xml    |   51 +
 .../YARN/configuration-mapred/mapred-site.xml   |  134 ++
 .../YARN/configuration/capacity-scheduler.xml   |   71 +
 .../YARN/configuration/ranger-yarn-audit.xml    |  177 ++
 .../ranger-yarn-plugin-properties.xml           |   82 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 +
 .../YARN/configuration/ranger-yarn-security.xml |   58 +
 .../services/YARN/configuration/yarn-env.xml    |  200 ++
 .../services/YARN/configuration/yarn-log4j.xml  |  103 ++
 .../services/YARN/configuration/yarn-site.xml   |  814 +++++++++
 .../stacks/HDP/3.0/services/YARN/kerberos.json  |  278 +++
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   |  173 ++
 .../YARN/quicklinks-mapred/quicklinks.json      |   80 +
 .../services/YARN/quicklinks/quicklinks.json    |   80 +
 .../3.0/services/YARN/themes-mapred/theme.json  |  132 ++
 .../HDP/3.0/services/YARN/themes/theme.json     |  250 +++
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     |   54 +
 .../main/resources/stacks/HDP/3.0/widgets.json  |   95 +
 .../2.1/hooks/before-START/scripts/params.py    |    2 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   27 +-
 .../resources/stacks/PERF/1.0/kerberos.json     |   78 +
 .../GRUMPY/configuration/grumpy-site.xml        |   12 +
 .../PERF/1.0/services/GRUMPY/kerberos.json      |   78 +
 .../services/GRUMPY/package/scripts/dwarf.py    |    4 +
 .../stacks/PERF/1.0/services/HAPPY/alerts.json  |   20 +
 .../HAPPY/configuration/happy-alert-config.xml  |   80 +
 .../services/HAPPY/configuration/happy-site.xml |   12 +
 .../PERF/1.0/services/HAPPY/kerberos.json       |   78 +
 .../stacks/PERF/1.0/services/HAPPY/metainfo.xml |    5 +
 .../HAPPY/package/alerts/alert_happy_process.py |   59 +
 .../1.0/services/HAPPY/package/scripts/dwarf.py |    4 +
 .../stacks/PERF/1.0/services/HBASE/alerts.json  |  110 +-
 .../HBASE/configuration/hbase-alert-config.xml  |   80 +
 .../stacks/PERF/1.0/services/HBASE/metainfo.xml |    1 +
 .../package/alerts/hbase_master_process.py      |   59 +
 .../alerts/hbase_regionserver_process.py        |   59 +
 .../HBASE/package/scripts/hbase_master.py       |    4 +
 .../HBASE/package/scripts/hbase_regionserver.py |    4 +
 .../package/scripts/phoenix_queryserver.py      |    4 +
 .../stacks/PERF/1.0/services/HDFS/alerts.json   | 1728 +-----------------
 .../HDFS/configuration/hdfs-alert-config.xml    |   80 +
 .../stacks/PERF/1.0/services/HDFS/metainfo.xml  |    1 +
 .../package/alerts/alert_checkpoint_time.py     |   38 +-
 .../alerts/alert_datanode_unmounted_data_dir.py |   47 +-
 .../package/alerts/alert_ha_namenode_health.py  |   75 -
 .../package/alerts/alert_metrics_deviation.py   |   85 -
 .../package/alerts/alert_nfs_gateway_process.py |   59 +
 .../package/alerts/alert_snamenode_process.py   |   59 +
 .../package/alerts/alert_upgrade_finalized.py   |   49 +-
 .../services/HDFS/package/scripts/datanode.py   |    4 +
 .../HDFS/package/scripts/journalnode.py         |    4 +
 .../services/HDFS/package/scripts/namenode.py   |    4 +
 .../services/HDFS/package/scripts/nfsgateway.py |    4 +
 .../services/HDFS/package/scripts/snamenode.py  |    4 +
 .../KERBEROS/configuration/kerberos-env.xml     |  380 ++++
 .../KERBEROS/configuration/krb5-conf.xml        |  109 ++
 .../PERF/1.0/services/KERBEROS/kerberos.json    |   17 +
 .../PERF/1.0/services/KERBEROS/metainfo.xml     |  123 ++
 .../KERBEROS/package/scripts/kerberos_client.py |   80 +
 .../KERBEROS/package/scripts/kerberos_common.py |  468 +++++
 .../services/KERBEROS/package/scripts/params.py |  200 ++
 .../KERBEROS/package/scripts/service_check.py   |   30 +
 .../KERBEROS/package/scripts/status_params.py   |   32 +
 .../services/KERBEROS/package/scripts/utils.py  |  105 ++
 .../KERBEROS/package/templates/krb5_conf.j2     |   54 +
 .../stacks/PERF/1.0/services/SLEEPY/alerts.json |   20 +
 .../configuration/sleepy-alert-config.xml       |   80 +
 .../SLEEPY/configuration/sleepy-site.xml        |   12 +
 .../PERF/1.0/services/SLEEPY/kerberos.json      |   78 +
 .../PERF/1.0/services/SLEEPY/metainfo.xml       |    5 +
 .../package/alerts/alert_sleepy_process.py      |   59 +
 .../services/SLEEPY/package/scripts/dwarf.py    |    4 +
 .../stacks/PERF/1.0/services/SNOW/alerts.json   |   20 +
 .../SNOW/configuration/snow-alert-config.xml    |   80 +
 .../services/SNOW/configuration/snow-site.xml   |   12 +
 .../stacks/PERF/1.0/services/SNOW/kerberos.json |   78 +
 .../stacks/PERF/1.0/services/SNOW/metainfo.xml  |    5 +
 .../SNOW/package/alerts/alert_snow_process.py   |   59 +
 .../services/SNOW/package/scripts/snow_white.py |    4 +
 .../stacks/PERF/1.0/services/YARN/alerts.json   |  361 +---
 .../YARN/configuration/yarn-alert-config.xml    |   80 +
 .../stacks/PERF/1.0/services/YARN/metainfo.xml  |    3 +
 .../package/alerts/alert_history_process.py     |   59 +
 .../package/alerts/alert_nodemanager_health.py  |   36 +-
 .../alerts/alert_nodemanagers_summary.py        |   68 -
 .../alerts/alert_resourcemanager_process.py     |   59 +
 .../package/alerts/alert_timeline_process.py    |   59 +
 .../scripts/application_timeline_server.py      |    4 +
 .../YARN/package/scripts/historyserver.py       |    4 +
 .../YARN/package/scripts/nodemanager.py         |    4 +
 .../YARN/package/scripts/resourcemanager.py     |    4 +
 .../PERF/1.0/services/ZOOKEEPER/alerts.json     |   20 +
 .../ZOOKEEPER/configuration/zk-alert-config.xml |   80 +
 .../PERF/1.0/services/ZOOKEEPER/kerberos.json   |   39 +
 .../PERF/1.0/services/ZOOKEEPER/metainfo.xml    |    4 +
 .../package/alerts/alert_zk_server_process.py   |   59 +
 .../package/scripts/zookeeper_server.py         |    4 +
 .../src/main/resources/stacks/stack_advisor.py  |  209 ++-
 .../src/main/resources/upgrade-pack.xsd         |   18 +-
 .../ExecutionCommandWrapperTest.java            |   17 +-
 .../TestActionSchedulerThreading.java           |   19 +-
 .../server/agent/HeartbeatTestHelper.java       |    6 +-
 .../server/agent/TestHeartbeatMonitor.java      |   13 +-
 .../configuration/RecoveryConfigHelperTest.java |    2 +-
 .../AmbariManagementControllerImplTest.java     |   96 +-
 .../AmbariManagementControllerTest.java         |  118 +-
 .../BlueprintConfigurationProcessorTest.java    |   11 +
 .../internal/CalculatedStatusTest.java          |   31 +
 .../internal/RequestResourceProviderTest.java   |   12 +-
 .../UpgradeResourceProviderHDP22Test.java       |   14 +-
 .../internal/UpgradeResourceProviderTest.java   |   13 +-
 .../LogSearchDataRetrievalServiceTest.java      |  142 +-
 .../logging/LoggingRequestHelperImplTest.java   |    5 +
 .../server/orm/InMemoryDefaultTestModule.java   |    5 +
 .../server/orm/dao/ServiceConfigDAOTest.java    |  144 +-
 .../ldap/AmbariLdapDataPopulatorTest.java       |    3 +-
 .../ComponentVersionCheckActionTest.java        |   19 +-
 .../upgrades/ConfigureActionTest.java           |   96 +-
 .../upgrades/FixOozieAdminUsersTest.java        |   76 +-
 .../HBaseEnvMaxDirectMemorySizeActionTest.java  |  187 +-
 .../upgrades/HiveEnvClasspathActionTest.java    |  148 +-
 .../upgrades/HiveZKQuorumConfigActionTest.java  |    2 +-
 .../upgrades/KerberosKeytabsActionTest.java     |   28 +-
 .../upgrades/RangerConfigCalculationTest.java   |   72 +-
 .../RangerKerberosConfigCalculationTest.java    |  173 +-
 .../upgrades/RangerKmsProxyConfigTest.java      |   36 +-
 .../SparkShufflePropertyConfigTest.java         |   30 +-
 .../upgrades/UpgradeActionTest.java             |   28 +-
 .../ambari/server/state/ConfigGroupTest.java    |   26 +-
 .../ambari/server/state/ConfigHelperTest.java   |   49 +-
 .../ambari/server/state/UpgradeHelperTest.java  |   39 +-
 .../state/alerts/AlertReceivedListenerTest.java |    8 +-
 .../state/cluster/ClusterDeadlockTest.java      |   17 +-
 .../server/state/cluster/ClusterTest.java       |  228 ++-
 .../server/state/cluster/ClustersTest.java      |    8 +-
 ...omponentHostConcurrentWriteDeadlockTest.java |    9 +-
 .../ambari/server/state/host/HostTest.java      |    6 +-
 .../server/state/stack/UpgradePackTest.java     |   22 +
 .../svccomphost/ServiceComponentHostTest.java   |   24 +-
 .../server/topology/AmbariContextTest.java      |   38 +-
 .../server/update/HostUpdateHelperTest.java     |   40 +-
 .../server/upgrade/UpgradeCatalog210Test.java   |    2 +
 .../server/upgrade/UpgradeCatalog250Test.java   |   98 +
 .../ambari/server/utils/RequestUtilsTest.java   |   18 +
 .../ambari/server/utils/StageUtilsTest.java     |    4 +
 ambari-server/src/test/python/TestMpacks.py     |  102 +-
 .../2.0.6/YARN/test_yarn_service_check.py       |  111 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   |   16 +-
 .../stacks/2.1/common/test_stack_advisor.py     |    2 +
 .../stacks/2.2/common/test_stack_advisor.py     |   46 +-
 .../stacks/2.3/common/test_stack_advisor.py     |   57 +-
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py  |    2 +-
 .../stacks/2.4/LOGSEARCH/test_logsearch.py      |    2 +-
 .../test/python/stacks/2.4/configs/default.json |    2 +-
 .../stacks/2.6/RANGER/test_ranger_admin.py      |  504 +++++
 .../2.6/configs/ranger-admin-default.json       |  704 +++++++
 .../2.6/configs/ranger-admin-secured.json       |  773 ++++++++
 .../test/python/stacks/test_stack_adviser.py    |  239 +++
 .../src/test/resources/dashboards/README.txt    |   18 +
 .../stacks/HDP/0.1/services/HDFS/metainfo.xml   |    1 +
 .../GANGLIA/upgrades/HDP/rolling-upgrade.xml    |   43 +
 .../app/controllers/global/update_controller.js |    5 +-
 .../journalNode/step1_controller.js             |   26 +-
 .../journalNode/step4_controller.js             |    6 +-
 .../journalNode/wizard_controller.js            |   28 +-
 .../nameNode/step7_controller.js                |   14 +-
 .../nameNode/step9_controller.js                |   17 +-
 .../resourceManager/step4_controller.js         |    5 +-
 .../main/admin/stack_and_upgrade_controller.js  |   13 +-
 .../admin/stack_upgrade_history_controller.js   |    2 +-
 .../controllers/main/service/info/summary.js    |   37 +-
 .../main/service/reassign/step4_controller.js   |   39 +-
 .../main/service/reassign/step6_controller.js   |   19 -
 .../app/controllers/wizard/step3_controller.js  |    2 +-
 ambari-web/app/data/HDP2/site_properties.js     |   36 +-
 .../app/mappers/stack_upgrade_history_mapper.js |    2 +-
 ambari-web/app/messages.js                      |    9 +-
 ambari-web/app/mixins.js                        |    1 -
 .../common/widgets/export_metrics_mixin.js      |   28 +-
 .../mixins/wizard/assign_master_components.js   |    1 +
 .../wizard/wizardProgressPageController.js      |   20 +
 .../configs/objects/service_config_property.js  |    7 -
 ambari-web/app/models/stack_service.js          |    4 +-
 .../stack_version/stack_upgrade_history.js      |    2 +-
 .../app/routes/manage_journalnode_routes.js     |    4 +-
 ambari-web/app/styles/stack_versions.less       |   11 +-
 .../highAvailability/journalNode/wizard.hbs     |    8 +-
 .../admin/stack_upgrade/upgrade_options.hbs     |   61 +-
 .../main/service/info/service_alert_popup.hbs   |   15 +-
 .../app/utils/configs/config_initializer.js     |   28 +-
 .../mount_points_based_initializer_mixin.js     |  340 ----
 .../widgets/slider_config_widget_view.js        |    7 +-
 .../highAvailability/journalNode/step2_view.js  |    5 +-
 .../upgrade_history_details_view.js             |    2 +-
 .../admin/stack_upgrade/upgrade_history_view.js |  137 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  |    6 +-
 .../service/reassign/step4_controller_test.js   |    2 +-
 ambari-web/test/models/stack_service_test.js    |   71 +-
 ambari-web/test/utils/ajax/ajax_test.js         |    9 +-
 .../utils/configs/config_initializer_test.js    |  457 -----
 .../stack_upgrade/upgrade_wizard_view_test.js   |    2 +-
 contrib/utils/perf/deploy-gce-perf-cluster.py   |   13 +-
 .../src/main/resources/ui/app/app.js            |   14 +-
 .../src/main/resources/view.xml                 |   23 +-
 contrib/views/files/src/main/resources/view.xml |    7 +
 .../views/hive-next/src/main/resources/view.xml |    7 +
 .../src/main/resources/ui/pig-web/app/app.js    |   14 +-
 contrib/views/pom.xml                           |    1 -
 contrib/views/tez/src/main/resources/view.xml   |    7 +
 .../ui/app/templates/components/job-details.hbs |    2 +-
 contrib/views/zeppelin/pom.xml                  |  190 --
 .../view/zeppelin/ZeppelinServiceCheck.java     |   55 -
 .../ambari/view/zeppelin/ZeppelinServlet.java   |  113 --
 .../zeppelin/src/main/resources/WEB-INF/web.xml |   40 -
 .../src/main/resources/view.log4j.properties    |   27 -
 .../views/zeppelin/src/main/resources/view.xml  |   48 -
 pom.xml                                         |    1 +
 utility/pom.xml                                 |   33 +-
 ...AvoidTransactionalOnPrivateMethodsCheck.java |   55 +
 .../src/main/resources/checkstyle_packages.xml  |   15 +
 ...dTransactionalOnPrivateMethodsCheckTest.java |   49 +
 .../InputTransactionalOnPrivateMethods.java     |   46 +
 477 files changed, 20800 insertions(+), 6990 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js
index c83004c..0ad2af3 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js
@@ -30,57 +30,6 @@ angular.module('ambariAdminConsole')
       'CLUSTER.USER'
     ],
 
-    orderedAuthorizations : [
-      "SERVICE.VIEW_METRICS",
-      "SERVICE.VIEW_STATUS_INFO",
-      "SERVICE.VIEW_CONFIGS",
-      "SERVICE.COMPARE_CONFIGS",
-      "SERVICE.VIEW_ALERTS",
-      "SERVICE.START_STOP",
-      "SERVICE.DECOMMISSION_RECOMMISSION",
-      "SERVICE.RUN_SERVICE_CHECK",
-      "SERVICE.TOGGLE_MAINTENANCE",
-      "SERVICE.RUN_CUSTOM_COMMAND",
-      "SERVICE.MODIFY_CONFIGS",
-      "SERVICE.MANAGE_CONFIG_GROUPS",
-      "SERVICE.MOVE",
-      "SERVICE.ENABLE_HA",
-      "SERVICE.MANAGE_ALERTS",
-      "SERVICE.TOGGLE_ALERTS",
-      "SERVICE.ADD_DELETE_SERVICES",
-      "SERVICE.VIEW_OPERATIONAL_LOGS",
-      "HOST.VIEW_CONFIGS",
-      "HOST.VIEW_METRICS",
-      "HOST.VIEW_STATUS_INFO",
-      "HOST.ADD_DELETE_COMPONENTS",
-      "HOST.ADD_DELETE_HOSTS",
-      "HOST.TOGGLE_MAINTENANCE",
-      "CLUSTER.VIEW_ALERTS",
-      "CLUSTER.VIEW_CONFIGS",
-      "CLUSTER.VIEW_METRICS",
-      "CLUSTER.VIEW_STACK_DETAILS",
-      "CLUSTER.VIEW_STATUS_INFO",
-      "CLUSTER.MANAGE_ALERTS",
-      "CLUSTER.MANAGE_CONFIG_GROUPS",
-      "CLUSTER.MANAGE_CREDENTIALS",
-      "CLUSTER.MODIFY_CONFIGS",
-      "CLUSTER.TOGGLE_ALERTS",
-      "CLUSTER.TOGGLE_KERBEROS",
-      "CLUSTER.UPGRADE_DOWNGRADE_STACK",
-      "CLUSTER.RUN_CUSTOM_COMMAND",
-      "AMBARI.ADD_DELETE_CLUSTERS",
-      "AMBARI.ASSIGN_ROLES",
-      "AMBARI.EDIT_STACK_REPOS",
-      "AMBARI.MANAGE_GROUPS",
-      "AMBARI.MANAGE_SETTINGS",
-      "AMBARI.MANAGE_STACK_VERSIONS",
-      "AMBARI.MANAGE_USERS",
-      "AMBARI.MANAGE_VIEWS",
-      "AMBARI.RENAME_CLUSTER",
-      "AMBARI.RUN_CUSTOM_COMMAND",
-      "SERVICE.SET_SERVICE_USERS_GROUPS"
-    ],
-
     orderedLevels: ['SERVICE', 'HOST', 'CLUSTER', 'AMBARI'],
 
     ineditableRoles : ['VIEW.USER', 'AMBARI.ADMINISTRATOR'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/RoleDetailsModal.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/RoleDetailsModal.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/RoleDetailsModal.js
index 5a14b33..06019c2 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/RoleDetailsModal.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/RoleDetailsModal.js
@@ -33,9 +33,9 @@ angular.module('ambariAdminConsole')
         templateUrl: 'views/modals/RoleDetailsModal.html',
         size: 'lg',
         controller: function($scope, $modalInstance) {
+          var authorizationsOrder;
           $scope.title = '';
           $scope.orderedRoles = ['AMBARI.ADMINISTRATOR'].concat(Cluster.orderedRoles).reverse();
-          $scope.orderedAuthorizations = Cluster.orderedAuthorizations;
           $scope.orderedLevels = Cluster.orderedLevels;
           $scope.authHash = {};
           $scope.getLevelName = function (key) {
@@ -44,25 +44,34 @@ angular.module('ambariAdminConsole')
           angular.forEach(roles, function (r) {
             angular.forEach(r.authorizations, function (auth) {
               var match = auth.authorization_id.match(/(\w+)\./),
-                levelKey = match && match[1],
-                isLevelDisplayed = $scope.orderedAuthorizations.some(function (item) {
-                  return !item.indexOf(levelKey);
-                });
+                  levelKey = match && match[1],
+                  isLevelDisplayed = $scope.orderedLevels.indexOf(levelKey) !== -1;
               if (isLevelDisplayed) {
                 if (!$scope.authHash[levelKey]) {
                   $scope.authHash[levelKey] = {};
                 }
                 if (!$scope.authHash[levelKey][auth.authorization_id]) {
-                  $scope.authHash[levelKey][auth.authorization_id] = auth.authorization_name;
+                  $scope.authHash[levelKey][auth.authorization_id] = {
+                    name: auth.authorization_name,
+                    roles: {}
+                  };
                 }
-                if (!r.authHash) {
-                  r.authHash = {};
-                }
-                r.authHash[auth.authorization_id] = true;
+                $scope.authHash[levelKey][auth.authorization_id].roles[r.permission_name] = true;
               }
             });
           });
-          $scope.roles = roles.sort(function(a, b) {
+
+          // sort authorizations for each level by number of roles permissions
+          for (var level in $scope.authHash) {
+            if ($scope.authHash.hasOwnProperty(level)) {
+              authorizationsOrder = Object.keys($scope.authHash[level]).sort(function (a, b) {
+                return Object.keys($scope.authHash[level][b].roles).length - Object.keys($scope.authHash[level][a].roles).length;
+              });
+              $scope.authHash[level].order = authorizationsOrder;
+            }
+          }
+
+          $scope.roles = roles.sort(function (a, b) {
             return $scope.orderedRoles.indexOf(a.permission_name) - $scope.orderedRoles.indexOf(b.permission_name);
           });
           $scope.ok = function() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/RoleDetailsModal.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/RoleDetailsModal.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/RoleDetailsModal.html
index 926bea9..942a733 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/RoleDetailsModal.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/RoleDetailsModal.html
@@ -40,10 +40,10 @@
         </div>
       </div>
       <div class="table-row-wrapper">
-        <div class="table-row" ng-repeat="auth in orderedAuthorizations" ng-if="authHash[level][auth]">
-          <div class="table-cell">{{authHash[level][auth]}}</div>
+        <div class="table-row" ng-repeat="auth_id in authHash[level].order">
+          <div class="table-cell">{{authHash[level][auth_id].name}}</div>
           <div class="table-cell text-center" ng-repeat="role in roles">
-            <i class="glyphicon glyphicon-ok green-icon" ng-show="role.authHash[auth]"></i>
+            <i class="glyphicon glyphicon-ok green-icon" ng-show="authHash[level][auth_id].roles[role.permission_name]"></i>
           </div>
         </div>
       </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index 43740ad..c1d4c02 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -39,6 +39,7 @@ alert_kinit_timeout=14400000
 system_resource_overrides=/etc/resource_overrides
 ; memory_threshold_soft_mb=400
 ; memory_threshold_hard_mb=1000
+; ignore_mount_points=/mnt/custom1,/mnt/custom2
 
 [security]
 keysdir=/var/lib/ambari-agent/keys

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index 3ec0621..cc10728 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -294,6 +294,7 @@ class ActionQueue(threading.Thread):
 
     logger.info("Command execution metadata - taskId = {taskId}, retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}, log_output = {log_command_output}".
                  format(taskId=taskId, retryAble=retryAble, retryDuration=retryDuration, log_command_output=log_command_output))
+    command_canceled = False
     while retryDuration >= 0:
       numAttempts += 1
       start = 0
@@ -322,6 +323,7 @@ class ActionQueue(threading.Thread):
           status = self.FAILED_STATUS
           if (commandresult['exitcode'] == -signal.SIGTERM) or (commandresult['exitcode'] == -signal.SIGKILL):
             logger.info('Command with taskId = {cid} was canceled!'.format(cid=taskId))
+            command_canceled = True
             break
 
       if status != self.COMPLETED_STATUS and retryAble and retryDuration > 0:
@@ -338,6 +340,15 @@ class ActionQueue(threading.Thread):
                     .format(cid=taskId, status=status, retryAble=retryAble, retryDuration=retryDuration, delay=delay))
         break
 
+    # do not fail task which was rescheduled from server
+    if command_canceled:
+      with self.commandQueue.mutex:
+        for com in self.commandQueue.queue:
+          if com['taskId'] == command['taskId']:
+            logger.info('Command with taskId = {cid} was rescheduled by server. '
+                        'Fail report on cancelled command won\'t be sent with heartbeat.'.format(cid=taskId))
+            return
+
     # final result to stdout
     commandresult['stdout'] += '\n\nCommand completed successfully!\n' if status == self.COMPLETED_STATUS else '\n\nCommand failed after ' + str(numAttempts) + ' tries\n'
     logger.info('Command with taskId = {cid} completed successfully!'.format(cid=taskId) if status == self.COMPLETED_STATUS else 'Command with taskId = {cid} failed after {attempts} tries'.format(cid=taskId, attempts=numAttempts))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index 7d61611..11c8cbe 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -154,12 +154,7 @@ class CustomServiceOrchestrator():
         self.file_cache.get_host_scripts_base_dir(server_url_prefix)          
         hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
         base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
-        from ActionQueue import ActionQueue  # To avoid cyclic dependency
-        if self.COMMAND_TYPE in command and command[self.COMMAND_TYPE] == ActionQueue.EXECUTION_COMMAND:
-          logger.info("Found it - " + str(command[self.COMMAND_TYPE]) + " yeah")
-          # limiting to only EXECUTION_COMMANDs for now
-          # TODO need a design for limiting to specific role/component such as METRICS_GRAFANA
-          self.file_cache.get_dashboard_base_dir(server_url_prefix)
+        self.file_cache.get_custom_resources_subdir(command, server_url_prefix)
 
         script_path = self.resolve_script_path(base_dir, script)
         script_tuple = (script_path, base_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/main/python/ambari_agent/FileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/FileCache.py b/ambari-agent/src/main/python/ambari_agent/FileCache.py
index 83ac373..a9ea8f6 100644
--- a/ambari-agent/src/main/python/ambari_agent/FileCache.py
+++ b/ambari-agent/src/main/python/ambari_agent/FileCache.py
@@ -45,7 +45,6 @@ class FileCache():
   STACKS_CACHE_DIRECTORY="stacks"
   COMMON_SERVICES_DIRECTORY="common-services"
   CUSTOM_ACTIONS_CACHE_DIRECTORY="custom_actions"
-  DASHBOARD_DIRECTORY="dashboards"
   HOST_SCRIPTS_CACHE_DIRECTORY="host_scripts"
   HASH_SUM_FILE=".hash"
   ARCHIVE_NAME="archive.zip"
@@ -100,12 +99,17 @@ class FileCache():
                                   server_url_prefix)
 
 
-  def get_dashboard_base_dir(self, server_url_prefix):
+  def get_custom_resources_subdir(self, command, server_url_prefix):
     """
-    Returns a base directory for dashboards
+    Returns a custom directory which must be a subdirectory of the resources dir
     """
+    try:
+      custom_dir = command['commandParams']['custom_folder']
+    except KeyError:
+      return None
+
     return self.provide_directory(self.cache_dir,
-                                  self.DASHBOARD_DIRECTORY,
+                                  custom_dir,
                                   server_url_prefix)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 3c94d28..0d431a3 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -41,6 +41,7 @@ class Hardware:
   CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT = '10'
   IGNORE_ROOT_MOUNTS = ["proc", "dev", "sys"]
   IGNORE_DEVICES = ["proc", "tmpfs", "cgroup", "mqueue", "shm"]
+  LINUX_PATH_SEP = "/"
 
   def __init__(self, config):
     self.hardware = {
@@ -88,6 +89,37 @@ class Hardware:
     return True
 
   @classmethod
+  def _is_mount_blacklisted(cls, blacklist, mount_point):
+    """
+    Verify if particular mount point is in the black list.
+
+    :return True if mount_point or a part of mount point is in the blacklist, otherwise return False
+
+     Example:
+       Mounts: /, /mnt/my_mount, /mnt/my_mount/sub_mount
+       Blacklist: /mnt/my_mount
+       Result: /
+
+    :type blacklist list
+    :type mount_point str
+    :rtype bool
+    """
+
+    if not blacklist or not mount_point:
+      return False
+
+    mount_point_elements = mount_point.split(cls.LINUX_PATH_SEP)
+
+    for el in blacklist:
+      el_list = el.split(cls.LINUX_PATH_SEP)
+      # making patch elements comparision
+      if el_list == mount_point_elements[:len(el_list)]:
+        return True
+
+    return False
+
+
+  @classmethod
   @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
   def osdisks(cls, config=None):
     """ Run df to find out the disks on the host. Only works on linux
@@ -95,6 +127,11 @@ class Hardware:
     and any mounts with spaces. """
     timeout = cls._get_mount_check_timeout(config)
     command = ["timeout", timeout, "df", "-kPT"]
+    blacklisted_mount_points = []
+
+    if config:
+      ignore_mount_value = config.get("agent", "ignore_mount_points", default="")
+      blacklisted_mount_points = [item.strip() for item in ignore_mount_value.split(",")]
 
     if not cls._check_remote_mounts(config):
       command.append("-l")
@@ -103,6 +140,7 @@ class Hardware:
     dfdata = df.communicate()[0]
     mounts = [cls._parse_df_line(line) for line in dfdata.splitlines() if line]
     result_mounts = []
+    ignored_mounts = []
 
     for mount in mounts:
       if not mount:
@@ -113,13 +151,21 @@ class Hardware:
        - mounted device is not in the ignored list
        - is accessible to user under which current process running
        - it is not file-mount (docker environment)
+       - mount path or a part of mount path is not in the blacklist
       """
-      if mount["device"] not in cls.IGNORE_DEVICES and \
+      if mount["device"] not in cls.IGNORE_DEVICES and\
          mount["mountpoint"].split("/")[0] not in cls.IGNORE_ROOT_MOUNTS and\
-         cls._chk_writable_mount(mount['mountpoint']) and \
-         not path_isfile(mount["mountpoint"]):
+         cls._chk_writable_mount(mount['mountpoint']) and\
+         not path_isfile(mount["mountpoint"]) and\
+         not cls._is_mount_blacklisted(blacklisted_mount_points, mount["mountpoint"]):
 
         result_mounts.append(mount)
+      else:
+        ignored_mounts.append(mount)
+
+      if len(ignored_mounts) > 0:
+        ignore_list = [el["mountpoint"] for el in ignored_mounts]
+        logger.info("Some mount points was ignored: {0}".format(', '.join(ignore_list)))
 
     return result_mounts
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 338daac..3810b52 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -121,12 +121,12 @@ class HostInfo(object):
     return False
 
 def get_ntp_service():
-  if OSCheck.is_redhat_family() and int(OSCheck.get_os_major_version()) >= 7:
-    return ("chronyd", "ntpd",)
-  elif OSCheck.is_redhat_family():
-    return ("ntpd",)
-  elif OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
-    return ("ntp",)
+  if OSCheck.is_redhat_family():
+    return ("ntpd", "chronyd",)
+  elif OSCheck.is_suse_family():
+    return ("ntpd", "ntp",)
+  elif OSCheck.is_ubuntu_family():
+    return ("ntp", "chrony",)
 
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
index 7d04d42..d5dde8b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
@@ -27,6 +27,7 @@ import os, errno, time, pprint, tempfile, threading
 import sys
 from threading import Thread
 import copy
+import signal
 
 from mock.mock import patch, MagicMock, call
 from ambari_agent.CustomServiceOrchestrator import CustomServiceOrchestrator
@@ -703,6 +704,53 @@ class TestActionQueue(TestCase):
     report = actionQueue.result()
     self.assertEqual(len(report['reports']), 0)
 
+  def test_cancel_with_reschedule_command(self):
+    config = AmbariConfig()
+    tempdir = tempfile.gettempdir()
+    config.set('agent', 'prefix', tempdir)
+    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
+    config.set('agent', 'tolerate_download_failures', "true")
+    dummy_controller = MagicMock()
+    actionQueue = ActionQueue(config, dummy_controller)
+    unfreeze_flag = threading.Event()
+    python_execution_result_dict = {
+      'stdout': 'out',
+      'stderr': 'stderr',
+      'structuredOut' : '',
+      'status' : '',
+      'exitcode' : -signal.SIGTERM
+    }
+
+    def side_effect(command, tmpoutfile, tmperrfile, override_output_files=True, retry=False):
+      unfreeze_flag.wait()
+      return python_execution_result_dict
+    def patched_aq_execute_command(command):
+      # We have to perform patching for separate thread in the same thread
+      with patch.object(CustomServiceOrchestrator, "runCommand") as runCommand_mock:
+        runCommand_mock.side_effect = side_effect
+        actionQueue.execute_command(command)
+
+    # We call method in a separate thread
+    execution_thread = Thread(target = patched_aq_execute_command ,
+                              args = (self.datanode_install_command, ))
+    execution_thread.start()
+    #  check in progress report
+    # wait until ready
+    while True:
+      time.sleep(0.1)
+      report = actionQueue.result()
+      if len(report['reports']) != 0:
+        break
+
+    unfreeze_flag.set()
+    # wait until ready
+    while len(report['reports']) != 0:
+      time.sleep(0.1)
+      report = actionQueue.result()
+
+    # check report
+    self.assertEqual(len(report['reports']), 0)
+
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(CustomServiceOrchestrator, "runCommand")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index 0304adc..563d250 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -68,7 +68,7 @@ class TestCustomServiceOrchestrator(TestCase):
   def test_add_reg_listener_to_controller(self, FileCache_mock):
     FileCache_mock.return_value = None
     dummy_controller = MagicMock()
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     tempdir = tempfile.gettempdir()
     config.set('agent', 'prefix', tempdir)
     CustomServiceOrchestrator(config, dummy_controller)
@@ -204,7 +204,7 @@ class TestCustomServiceOrchestrator(TestCase):
   def test_resolve_script_path(self, FileCache_mock, exists_mock):
     FileCache_mock.return_value = None
     dummy_controller = MagicMock()
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     orchestrator = CustomServiceOrchestrator(config, dummy_controller)
     # Testing existing path
     exists_mock.return_value = True
@@ -220,7 +220,7 @@ class TestCustomServiceOrchestrator(TestCase):
     except AgentException:
       pass # Expected
 
-  @patch.object(FileCache, "get_dashboard_base_dir")
+  @patch.object(FileCache, "get_custom_resources_subdir")
   @patch.object(CustomServiceOrchestrator, "resolve_script_path")
   @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path")
   @patch.object(FileCache, "get_host_scripts_base_dir")
@@ -235,7 +235,7 @@ class TestCustomServiceOrchestrator(TestCase):
                       get_host_scripts_base_dir_mock, 
                       resolve_hook_script_path_mock, 
                       resolve_script_path_mock,
-                      get_dashboard_base_dir_mock):
+                      get_custom_resources_subdir_mock):
     
     FileCache_mock.return_value = None
     command = {
@@ -267,7 +267,6 @@ class TestCustomServiceOrchestrator(TestCase):
     unix_process_id = 111
     orchestrator.commands_in_progress = {command['taskId']: unix_process_id}
     get_hook_base_dir_mock.return_value = "/hooks/"
-    get_dashboard_base_dir_mock.return_value = "/dashboards/"
     # normal run case
     run_file_mock.return_value = {
         'stdout' : 'sss',
@@ -278,7 +277,6 @@ class TestCustomServiceOrchestrator(TestCase):
     self.assertEqual(ret['exitcode'], 0)
     self.assertTrue(run_file_mock.called)
     self.assertEqual(run_file_mock.call_count, 3)
-    self.assertTrue(get_dashboard_base_dir_mock.called)
 
     run_file_mock.reset_mock()
 
@@ -301,25 +299,6 @@ class TestCustomServiceOrchestrator(TestCase):
 
     run_file_mock.reset_mock()
 
-    # For role=METRICS_GRAFANA, dashboards should be sync'd
-    command['role'] = 'METRICS_GRAFANA'
-    get_dashboard_base_dir_mock.reset_mock()
-    get_dashboard_base_dir_mock.return_value = "/dashboards/"
-
-    run_file_mock.return_value = {
-        'stdout' : 'sss',
-        'stderr' : 'eee',
-        'exitcode': 0,
-      }
-    ret = orchestrator.runCommand(command, "out.txt", "err.txt")
-    self.assertEqual(ret['exitcode'], 0)
-    self.assertTrue(run_file_mock.called)
-    self.assertEqual(run_file_mock.call_count, 3)
-    self.assertTrue(get_dashboard_base_dir_mock.called)
-
-    command['role'] = 'REGION_SERVER'
-    run_file_mock.reset_mock()
-
     # unknown script type case
     command['commandParams']['script_type'] = "SOME_TYPE"
     ret = orchestrator.runCommand(command, "out.txt", "err.txt")
@@ -332,7 +311,6 @@ class TestCustomServiceOrchestrator(TestCase):
 
     pass
 
-  @patch.object(FileCache, "get_dashboard_base_dir")
   @patch("ambari_commons.shell.kill_process_with_children")
   @patch.object(CustomServiceOrchestrator, "resolve_script_path")
   @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path")
@@ -347,8 +325,7 @@ class TestCustomServiceOrchestrator(TestCase):
                       get_hook_base_dir_mock, get_service_base_dir_mock,
                       get_host_scripts_base_dir_mock,
                       resolve_hook_script_path_mock, resolve_script_path_mock,
-                      kill_process_with_children_mock,
-                      get_dashboard_base_dir_mock):
+                      kill_process_with_children_mock):
     FileCache_mock.return_value = None
     command = {
       'role' : 'REGION_SERVER',
@@ -378,7 +355,6 @@ class TestCustomServiceOrchestrator(TestCase):
     unix_process_id = 111
     orchestrator.commands_in_progress = {command['taskId']: unix_process_id}
     get_hook_base_dir_mock.return_value = "/hooks/"
-    get_dashboard_base_dir_mock.return_value = "/dashboards/"
     run_file_mock_return_value = {
       'stdout' : 'killed',
       'stderr' : 'killed',

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
index fbefc2b..00f6b69 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
@@ -119,10 +119,16 @@ class TestFileCache(TestCase):
 
 
   @patch.object(FileCache, "provide_directory")
-  def test_get_dashboard_base_dir(self, provide_directory_mock):
+  def test_get_custom_resources_subdir(self, provide_directory_mock):
     provide_directory_mock.return_value = "dummy value"
     fileCache = FileCache(self.config)
-    res = fileCache.get_dashboard_base_dir("server_url_pref")
+    command = {
+      'commandParams': {
+        'custom_folder' : 'dashboards'
+      }
+    }
+
+    res = fileCache.get_custom_resources_subdir(command, "server_url_pref")
     self.assertEquals(
       pprint.pformat(provide_directory_mock.call_args_list[0][0]),
       "('/var/lib/ambari-agent/cache', 'dashboards', 'server_url_pref')")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-agent/src/test/python/ambari_agent/TestHardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index 038b2f8..ff3b40b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -25,6 +25,7 @@ from mock.mock import patch, MagicMock, Mock
 import unittest
 import platform
 import socket
+import os
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 from ambari_agent import hostname
 from ambari_agent.Hardware import Hardware
@@ -373,6 +374,75 @@ SwapFree:        1598676 kB
     self.assertEquals(2, json_mock.call_count)
     self.assertEquals('value', result['key'])
 
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  def test_osdisks_blacklist(self, isfile_mock, chk_writable_mount_mock):
+    df_output = \
+      """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+      tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
+      tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
+      shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount/sub-dir
+      """
+
+    def isfile_side_effect(path):
+      assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
+      return path in assume_files
+
+    def chk_writable_mount_side_effect(path):
+      assume_read_only = ["/run/secrets"]
+      return path not in assume_read_only
+
+    isfile_mock.side_effect = isfile_side_effect
+    chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
+
+    config_dict = {
+      "agent": {
+        "ignore_mount_points": "/mnt/blacklisted_mount"
+      }
+    }
+
+    with patch("subprocess.Popen") as open_mock:
+      proc_mock = Mock()
+      attr = {
+        'communicate.return_value': [
+          df_output
+        ]
+      }
+      proc_mock.configure_mock(**attr)
+      open_mock.return_value = proc_mock
+
+      def conf_get(section, key, default=""):
+        if section in config_dict and key in config_dict[section]:
+          return config_dict[section][key]
+
+        return default
+
+      def has_option(section, key):
+        return section in config_dict and key in config_dict[section]
+
+      conf = Mock()
+      attr = {
+        'get.side_effect': conf_get,
+        'has_option.side_effect': has_option
+      }
+      conf.configure_mock(**attr)
+
+      result = Hardware.osdisks(conf)
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
+
 
 if __name__ == "__main__":
   unittest.main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
index 7b4e8f5..bfc786c 100644
--- a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
+++ b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
@@ -22,35 +22,41 @@ import os
 import random
 from resource_management.libraries.functions import conf_select
 
-DEFAULT_COLLECTOR_SUFFIX = '.sink.timeline.collector'
+DEFAULT_COLLECTOR_SUFFIX = '.sink.timeline.collector.hosts'
 DEFAULT_METRICS2_PROPERTIES_FILE_NAME = 'hadoop-metrics2.properties'
 
 def select_metric_collector_for_sink(sink_name):
-    # TODO check '*' sink_name
+  # TODO check '*' sink_name
 
-    all_collectors_string = get_metric_collectors_from_properties_file(sink_name)
-    all_collectors_list = all_collectors_string.split(',')
-    return select_metric_collector_hosts_from_hostnames(all_collectors_list)
+  all_collectors_string = get_metric_collectors_from_properties_file(sink_name)
+  return select_metric_collector_hosts_from_hostnames(all_collectors_string)
 
-def select_metric_collector_hosts_from_hostnames(hosts):
-    return random.choice(hosts)
+def select_metric_collector_hosts_from_hostnames(comma_separated_hosts):
+  if comma_separated_hosts:
+    hosts = comma_separated_hosts.split(',')
+    return get_random_host(hosts)
+  else:
+    return 'localhost'
+
+def get_random_host(hosts):
+  return random.choice(hosts)
 
 def get_metric_collectors_from_properties_file(sink_name):
-    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-    props = load_properties_from_file(os.path.join(hadoop_conf_dir, DEFAULT_METRICS2_PROPERTIES_FILE_NAME))
-    return props.get(sink_name + DEFAULT_COLLECTOR_SUFFIX)
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+  props = load_properties_from_file(os.path.join(hadoop_conf_dir, DEFAULT_METRICS2_PROPERTIES_FILE_NAME))
+  return props.get(sink_name + DEFAULT_COLLECTOR_SUFFIX)
 
 def load_properties_from_file(filepath, sep='=', comment_char='#'):
-    """
-    Read the file passed as parameter as a properties file.
-    """
-    props = {}
-    with open(filepath, "rt") as f:
-        for line in f:
-            l = line.strip()
-            if l and not l.startswith(comment_char):
-                key_value = l.split(sep)
-                key = key_value[0].strip()
-                value = sep.join(key_value[1:]).strip('" \t')
-                props[key] = value
-    return props
\ No newline at end of file
+  """
+  Read the file passed as parameter as a properties file.
+  """
+  props = {}
+  with open(filepath, "rt") as f:
+    for line in f:
+      l = line.strip()
+      if l and not l.startswith(comment_char):
+        key_value = l.split(sep)
+        key = key_value[0].strip()
+        value = sep.join(key_value[1:]).strip('" \t')
+        props[key] = value
+  return props
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/ambari_commons/logging_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/logging_utils.py b/ambari-common/src/main/python/ambari_commons/logging_utils.py
index 4ebe696..6535ebc 100644
--- a/ambari-common/src/main/python/ambari_commons/logging_utils.py
+++ b/ambari-common/src/main/python/ambari_commons/logging_utils.py
@@ -67,7 +67,10 @@ def set_debug_mode_from_options(options):
 #
 # Prints an "info" messsage.
 #
-def print_info_msg(msg):
+def print_info_msg(msg, forced=False):
+  if forced:
+    print("INFO: " + msg)
+    return
   if _VERBOSE:
     print("INFO: " + msg)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/ambari_commons/parallel_processing.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/parallel_processing.py b/ambari-common/src/main/python/ambari_commons/parallel_processing.py
new file mode 100644
index 0000000..c5a95de
--- /dev/null
+++ b/ambari-common/src/main/python/ambari_commons/parallel_processing.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import logging
+from multiprocessing import Process, Queue
+
+logger = logging.getLogger()
+
+SUCCESS = "SUCCESS"
+FAILED = "FAILED"
+
+class PrallelProcessResult(object):
+    def __init__(self, element, status, result):
+        self.result = result
+        self.status = status
+        self.element = element
+
+class ParallelProcess(Process):
+
+
+    def __init__(self, function, element, params, queue):
+        self.function = function
+        self.element = element
+        self.params = params
+        self.queue = queue
+        super(ParallelProcess, self).__init__()
+
+    def return_name(self):
+        ## NOTE: self.name is an attribute of multiprocessing.Process
+        return "Process running function '%s' for element '%s'" % (self.function, self.element)
+
+    def run(self):
+        try:
+            result = self.function(self.element, self.params)
+            self.queue.put(PrallelProcessResult(self.element, SUCCESS, result))
+        except Exception as e:
+            self.queue.put(PrallelProcessResult(self.element, FAILED,
+                            "Exception while running function '%s' for '%s'. Reason : %s" % (self.function, self.element, str(e))))
+        return
+
+def execute_in_parallel(function, array, params, wait_for_all = False):
+    logger.info("Started running %s for %s" % (function, array))
+    processs = []
+    q = Queue()
+    counter = len(array)
+    results = {}
+
+    for element in array:
+        process = ParallelProcess(function, element, params, q)
+        process.start()
+        processs.append(process)
+
+    while counter > 0:
+        tmp = q.get()
+        counter-=1
+        results[tmp.element] = tmp
+        if tmp.status == SUCCESS and not wait_for_all:
+            counter = 0
+
+    for process in processs:
+        process.terminate()
+
+    logger.info("Finished running %s for %s" % (function, array))
+
+    return results
+
+def func (elem, params):
+    if elem == 'S':
+        return "lalala"
+    else :
+        raise Exception('Exception')
+
+if __name__ == "__main__":
+    results = execute_in_parallel(func, ['F', 'BF', 'S'], None)
+    for result in results:
+        print results[result].element
+        print results[result].status
+        print results[result].result
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
index 3431495..46562e0 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
@@ -97,6 +97,7 @@ class StackFeature:
   RANGER_PID_SUPPORT = "ranger_pid_support"
   RANGER_KMS_PID_SUPPORT = "ranger_kms_pid_support"
   RANGER_ADMIN_PASSWD_CHANGE = "ranger_admin_password_change"
+  RANGER_SETUP_DB_ON_START = "ranger_setup_db_on_start"
   STORM_METRICS_APACHE_CLASSES = "storm_metrics_apache_classes"
   SPARK_JAVA_OPTS_SUPPORT = "spark_java_opts_support"
   ATLAS_HBASE_SETUP = "atlas_hbase_setup"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 519c88b..31a9be4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -188,7 +188,7 @@ def _get_single_version_from_stack_select():
     Logger.error("Could not verify stack version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
     return None
 
-  matches = re.findall(r"([\d\.]+\-\d+)", out)
+  matches = re.findall(r"([\d\.]+(?:-\d+)?)", out)
 
   if matches and len(matches) == 1:
     stack_version = matches[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
index ca8fe19..74db04a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
@@ -185,7 +185,7 @@ def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component
                    (get_stack_version_cmd, str(code), str(out)))
     return 1
 
-  matches = re.findall(r"([\d\.]+\-\d+)", out)
+  matches = re.findall(r"([\d\.]+(?:-\d+)?)", out)
   stack_version = matches[0] if matches and len(matches) > 0 else None
 
   if not stack_version:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
index 8257022..5a16061 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
@@ -70,7 +70,7 @@ def should_install_infra_solr():
 
 def should_install_infra_solr_client():
   config = Script.get_config()
-  return _has_applicable_local_component(config, ['INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN'])
+  return _has_applicable_local_component(config, ['INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN', 'LOGSEARCH_SERVER'])
 
 def should_install_logsearch_portal():
   config = Script.get_config()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
index a1d2f95..eeae4bc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
@@ -117,6 +117,8 @@ def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, gr
   """
   import params
   atlas_props = default('/configurations/application-properties', {})
+  merged_props = {}
+  merged_props.update(service_props)
 
   if has_atlas_in_cluster():
     # Take the subset
@@ -131,12 +133,12 @@ def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, gr
 
     merged_props.update(service_props)
 
-    Logger.info(format("Generating Atlas Hook config file {atlas_hook_filepath}"))
-    PropertiesFile(atlas_hook_filepath,
-                   properties = merged_props,
-                   owner = owner,
-                   group = group,
-                   mode = 0644)
+  Logger.info(format("Generating Atlas Hook config file {atlas_hook_filepath}"))
+  PropertiesFile(atlas_hook_filepath,
+           properties = merged_props,
+           owner = owner,
+           group = group,
+           mode = 0644)
 
 
 def setup_atlas_jar_symlinks(hook_name, jar_source_dir):
@@ -157,22 +159,23 @@ def setup_atlas_jar_symlinks(hook_name, jar_source_dir):
   """
   import params
 
-  if has_atlas_in_cluster():
-    atlas_home_dir = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ \
-      else format("{stack_root}/current/atlas-server")
-
-    # Will only exist if this host contains Atlas Server
-    atlas_hook_dir = os.path.join(atlas_home_dir, "hook", hook_name)
-    if os.path.exists(atlas_hook_dir):
-      Logger.info("Atlas Server is present on this host, will symlink jars inside of %s to %s if not already done." %
-                  (jar_source_dir, atlas_hook_dir))
-
-      src_files = os.listdir(atlas_hook_dir)
-      for file_name in src_files:
-        atlas_hook_file_name = os.path.join(atlas_hook_dir, file_name)
-        source_lib_file_name = os.path.join(jar_source_dir, file_name)
-        if os.path.isfile(atlas_hook_file_name):
-          Link(source_lib_file_name, to=atlas_hook_file_name)
+  atlas_home_dir = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ \
+    else format("{stack_root}/current/atlas-server")
+
+  # Will only exist if this host contains Atlas Server
+  atlas_hook_dir = os.path.join(atlas_home_dir, "hook", hook_name)
+  if os.path.exists(atlas_hook_dir):
+    Logger.info("Atlas Server is present on this host, will symlink jars inside of %s to %s if not already done." %
+                (jar_source_dir, atlas_hook_dir))
+
+    src_files = os.listdir(atlas_hook_dir)
+    for file_name in src_files:
+      atlas_hook_file_name = os.path.join(atlas_hook_dir, file_name)
+      source_lib_file_name = os.path.join(jar_source_dir, file_name)
+      if os.path.isfile(atlas_hook_file_name):
+        Link(source_lib_file_name, to=atlas_hook_file_name)
+  else:
+    Logger.info("Atlas hook directory path {0} doesn't exist".format(atlas_hook_dir))
 
 def install_atlas_hook_packages(atlas_plugin_package, atlas_ubuntu_plugin_package, host_sys_prepped,
                                 agent_stack_retry_on_unavailability, agent_stack_retry_count):

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/functions/simulate_perf_cluster_alert_behaviour.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/simulate_perf_cluster_alert_behaviour.py b/ambari-common/src/main/python/resource_management/libraries/functions/simulate_perf_cluster_alert_behaviour.py
new file mode 100644
index 0000000..736e5e3
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/simulate_perf_cluster_alert_behaviour.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__all__ = ["simulate_perf_cluster_alert_behaviour"]
+
+import logging
+import random
+import time
+
+from datetime import datetime
+from resource_management.core.exceptions import Fail
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+OK_MESSAGE = 'Ok'
+FAIL_MESSAGE = 'Expected Fail'
+UNKNOWN_MESSAGE = 'Expected Unknown'
+
+logger = logging.getLogger('ambari_alerts')
+
+return_values_map = {"true":[RESULT_CODE_OK, OK_MESSAGE], "false":[RESULT_CODE_CRITICAL, FAIL_MESSAGE],
+                     "none":[RESULT_CODE_UNKNOWN, UNKNOWN_MESSAGE]}
+
+def simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  alert_behaviour_type=None
+  alert_behaviour_type_key=alert_behaviour_properties["alert_behaviour_type"]
+  if alert_behaviour_type_key in configurations:
+    alert_behaviour_type = configurations[alert_behaviour_type_key].lower()
+
+  if alert_behaviour_type == "percentage":
+    alert_success_percentage=None
+    alert_success_percentage_key=alert_behaviour_properties["alert_success_percentage"]
+
+    if alert_success_percentage_key in configurations:
+      alert_success_percentage = configurations[alert_success_percentage_key]
+
+    if alert_success_percentage:
+      random_number = random.uniform(0, 100)
+      if random_number <= int(alert_success_percentage):
+        return (RESULT_CODE_OK, [OK_MESSAGE])
+      else:
+        return (RESULT_CODE_CRITICAL, [FAIL_MESSAGE])
+    else:
+      raise Fail("Percentage behaviour was set but alert.success.percentage was not set!")
+  elif alert_behaviour_type == "timeout":
+    alert_timeout_return_value=None
+    alert_timeout_secs=None
+    alert_timeout_return_value_key=alert_behaviour_properties["alert_timeout_return_value"]
+    alert_timeout_secs_key=alert_behaviour_properties["alert_timeout_secs"]
+
+    if alert_timeout_return_value_key in configurations:
+      alert_timeout_return_value = configurations[alert_timeout_return_value_key].lower()
+
+    if alert_timeout_secs_key in configurations:
+      alert_timeout_secs = configurations[alert_timeout_secs_key]
+
+    if alert_timeout_return_value and alert_timeout_secs:
+      logger.info("Sleeping for {0} seconds".format(alert_timeout_secs))
+      print "Sleeping for {0} seconds".format(alert_timeout_secs)
+      time.sleep(int(alert_timeout_secs))
+      return (return_values_map[alert_timeout_return_value][0], [return_values_map[alert_timeout_return_value][1]])
+    else:
+      raise Fail("Timeout behaviour was set but alert.timeout.return.value/alert.timeout.secs were not set!")
+  elif alert_behaviour_type == "flip":
+    alert_flip_interval_mins=None
+    alert_flip_interval_mins_key=alert_behaviour_properties["alert_flip_interval_mins"]
+
+    if alert_flip_interval_mins_key in configurations:
+      alert_flip_interval_mins = configurations[alert_flip_interval_mins_key]
+
+    if alert_flip_interval_mins:
+      curr_time = datetime.utcnow()
+      return_value = ((curr_time.minute / int(alert_flip_interval_mins)) % 2) == 0
+      return (return_values_map[str(return_value).lower()][0], [return_values_map[str(return_value).lower()][1]])
+    else:
+      raise Fail("Flip behaviour was set but alert.flip.interval.mins was not set!")
+
+
+
+  result_code = RESULT_CODE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/dummy.py b/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
index 2a48de3..3dcece5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
@@ -31,6 +31,9 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core.exceptions import ComponentIsNotRunning
 
 
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
 class Dummy(Script):
   """
   Dummy component to be used for performance testing since doesn't actually run a service.
@@ -75,6 +78,14 @@ class Dummy(Script):
     print "Start"
     self.prepare()
 
+    if self.config['configurations']['cluster-env']['security_enabled'] :
+      print "Executing kinit... "
+      kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+      principal_replaced = self.config['configurations'][self.principal_conf_name][self.principal_name].replace("_HOST", self.host_name)
+      keytab_path_replaced = self.config['configurations'][self.keytab_conf_name][self.keytab_name].replace("_HOST", self.host_name)
+      Execute("%s -kt %s %s" % (kinit_path_local, keytab_path_replaced, principal_replaced),
+              user="root")
+
     if not os.path.isfile(self.pid_file):
       print "Creating pid file: %s" % self.pid_file
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
index 667c9ff..08680f6 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
@@ -141,60 +141,4 @@ public class MapperDateTest {
     assertEquals("Invalid value wasn't returned as it is", invalidValue, mappedValue);
     assertTrue("jsonObj is not empty", jsonObj.isEmpty());
   }
-  
-  @Test
-  public void testMapperDate_patternWithoutYear_previousYearLog() throws Exception {
-    LOG.info("testMapperDate_patternWithoutYear_previousYearLog()");
-    String fieldName = "logtime";
-    Calendar currentCalendar = Calendar.getInstance();
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
-    String srcDatePattern ="MMM dd HH:mm:ss";
-    mapConfigs.put("src_date_pattern", srcDatePattern);
-    MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, fieldName, null, mapConfigs));
-    Map<String, Object> jsonObj = new HashMap<>();
-    Calendar nextMonthCalendar = Calendar.getInstance();
-    
-    nextMonthCalendar.set(Calendar.MONTH, currentCalendar.get(Calendar.MONTH)+1 );
-    String inputDateStr = new SimpleDateFormat("MMM").format(nextMonthCalendar.getTime()) + " 01 12:01:45";
-    Object mappedValue = mapperDate.apply(jsonObj, inputDateStr);
-    Date mappedDateValue = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").parse(mappedValue.toString());
-    String mappedDateValueStr = new SimpleDateFormat(srcDatePattern).format(mappedDateValue);
-    assertEquals(Date.class, mappedDateValue.getClass());
-    
-    int expectedLogYear = currentCalendar.get(Calendar.YEAR)-1;
-    Calendar mapppedValueCalendar = Calendar.getInstance();
-    mapppedValueCalendar.setTime(mappedDateValue);
-    assertEquals("Mapped year wasn't matched properly", expectedLogYear, mapppedValueCalendar.get(Calendar.YEAR));
-    assertEquals("Mapped date wasn't matched properly", inputDateStr, mappedDateValueStr);
-    assertEquals("Value wasn't put into jsonObj",mappedValue, jsonObj.remove(fieldName));
-    assertTrue("jsonObj is not empty", jsonObj.isEmpty());
-  }
-  
-  @Test
-  public void testMapperDate_patternWithoutYear_currentYearLog() throws Exception {
-    LOG.info("testMapperDate_patternWithoutYear_currentYearLog()");
-    String fieldName = "logtime";
-    Calendar currentCalendar = Calendar.getInstance();
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
-    String srcDatePattern ="MMM dd HH:mm:ss";
-    mapConfigs.put("src_date_pattern", srcDatePattern);
-    MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, fieldName, null, mapConfigs));
-    Map<String, Object> jsonObj = new HashMap<>();
-    String inputDateStr = new SimpleDateFormat("MMM").format(currentCalendar.getTime()) + " 01 12:01:45";
-    Object mappedValue = mapperDate.apply(jsonObj, inputDateStr);
-    Date mappedDateValue = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").parse(mappedValue.toString());
-    String mappedDateValueStr = new SimpleDateFormat(srcDatePattern).format(mappedDateValue);
-    assertEquals(Date.class, mappedDateValue.getClass());
-    int expectedLogYear = currentCalendar.get(Calendar.YEAR);
-    Calendar mapppedValueCalendar = Calendar.getInstance();
-    mapppedValueCalendar.setTime(mappedDateValue);
-    assertEquals("Mapped year wasn't matched properly", expectedLogYear, mapppedValueCalendar.get(Calendar.YEAR));
-    assertEquals("Mapped date wasn't matched properly", inputDateStr, mappedDateValueStr);
-    assertEquals("Value wasn't put into jsonObj",mappedValue, jsonObj.remove(fieldName));
-    assertTrue("jsonObj is not empty", jsonObj.isEmpty());
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
index 2c3f4f5..14f83cf 100644
--- a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
+++ b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
@@ -30,6 +30,7 @@ import org.apache.ambari.logsearch.common.ManageStartEndTime;
 import org.apache.ambari.logsearch.common.PropertiesHelper;
 import org.apache.ambari.logsearch.conf.ApplicationConfig;
 import org.apache.ambari.logsearch.util.SSLUtil;
+import org.apache.ambari.logsearch.web.listener.LogSearchSessionListener;
 import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 import org.eclipse.jetty.server.Connector;
@@ -66,7 +67,7 @@ public class LogSearch {
 
   private static final String WEB_RESOURCE_FOLDER = "webapps/app";
   private static final String ROOT_CONTEXT = "/";
-  private static final Integer SESSION_TIMEOUT = 30;
+  private static final Integer SESSION_TIMEOUT = 60 * 30;
 
 
   public static void main(String[] argv) {
@@ -136,6 +137,7 @@ public class LogSearch {
     context.setBaseResource(Resource.newResource(webResourceBase));
     context.setContextPath(ROOT_CONTEXT);
     context.setParentLoaderPriority(true);
+    context.addEventListener(new LogSearchSessionListener());
 
     // Configure Spring
     context.addEventListener(new ContextLoaderListener());


[24/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/dao/SolrCollectionDao.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/dao/SolrCollectionDao.java b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/dao/SolrCollectionDao.java
index 834ba38..da76924 100644
--- a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/dao/SolrCollectionDao.java
+++ b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/dao/SolrCollectionDao.java
@@ -20,8 +20,12 @@ package org.apache.ambari.logsearch.dao;
 
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
 import org.apache.commons.lang.StringUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.common.SolrException;
@@ -33,6 +37,7 @@ import org.slf4j.LoggerFactory;
 import static org.apache.ambari.logsearch.solr.SolrConstants.CommonLogConstants.ROUTER_FIELD;
 
 import javax.inject.Named;
+import javax.ws.rs.core.Response;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -45,6 +50,8 @@ class SolrCollectionDao {
   private static final Logger LOG = LoggerFactory.getLogger(SolrCollectionDao.class);
 
   private static final int SETUP_RETRY_SECOND = 30;
+  private static final String MODIFY_COLLECTION_QUERY = "/admin/collections?action=MODIFYCOLLECTION&collection=%s&%s=%d";
+  private static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode";
 
   /**
    * This will try to get the collections from the Solr. Ping doesn't work if
@@ -189,7 +196,16 @@ class SolrCollectionDao {
       }
     } else {
       LOG.info("Collection " + solrPropsConfig.getCollection() + " is already there. Will check whether it has the required shards");
-      Collection<String> existingShards = getShards(solrClient, solrPropsConfig);
+      Collection<Slice> slices = getSlices(solrClient, solrPropsConfig);
+      Collection<String> existingShards = getShards(slices, solrPropsConfig);
+      if (existingShards.size() < shardsList.size()) {
+        try {
+          updateMaximumNumberOfShardsPerCore(slices, solrPropsConfig);
+        } catch (Throwable t) {
+          returnValue = false;
+          LOG.error(String.format("Exception during updating collection (%s)", t));
+        }
+      }
       for (String shard : shardsList) {
         if (!existingShards.contains(shard)) {
           try {
@@ -216,10 +232,44 @@ class SolrCollectionDao {
     return returnValue;
   }
 
-  private Collection<String> getShards(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) {
-    Collection<String> list = new HashSet<>();
+  private String getRandomBaseUrl(Collection<Slice> slices) {
+    String coreUrl = null;
+    if (slices != null) {
+      for (Slice slice : slices) {
+        if (!slice.getReplicas().isEmpty()) {
+          Replica replica = slice.getReplicas().iterator().next();
+          coreUrl = replica.getStr("base_url");
+          if (coreUrl != null) {
+            break;
+          }
+        }
+      }
+    }
+    return coreUrl;
+  }
+
+  private void updateMaximumNumberOfShardsPerCore(Collection<Slice> slices, SolrPropsConfig solrPropsConfig) throws IOException {
+    String baseUrl = getRandomBaseUrl(slices);
+    if (baseUrl != null) {
+      CloseableHttpClient httpClient = HttpClientUtil.createClient(null);
+      HttpGet request = new HttpGet(baseUrl + String.format(MODIFY_COLLECTION_QUERY,
+        solrPropsConfig.getCollection(), MAX_SHARDS_PER_NODE, calculateMaxShardsPerNode(solrPropsConfig)));
+      HttpResponse response = httpClient.execute(request);
+      if (response.getStatusLine().getStatusCode() != Response.Status.OK.getStatusCode()) {
+        throw new IllegalStateException(String.format("Cannot update collection (%s) - increase max number of nodes per core", solrPropsConfig.getCollection()));
+      }
+    } else {
+      throw new IllegalStateException(String.format("Cannot get any core url for updating collection (%s)", solrPropsConfig.getCollection()));
+    }
+  }
+
+  private Collection<Slice> getSlices(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) {
     ZkStateReader reader = solrClient.getZkStateReader();
-    Collection<Slice> slices = reader.getClusterState().getSlices(solrPropsConfig.getCollection());
+    return reader.getClusterState().getSlices(solrPropsConfig.getCollection());
+  }
+
+  private Collection<String> getShards(Collection<Slice> slices, SolrPropsConfig solrPropsConfig) {
+    Collection<String> list = new HashSet<>();
     for (Slice slice : slices) {
       for (Replica replica : slice.getReplicas()) {
         LOG.info("colName=" + solrPropsConfig.getCollection() + ", slice.name=" + slice.getName() + ", slice.state=" + slice.getState() +
@@ -245,7 +295,7 @@ class SolrCollectionDao {
     collectionCreateRequest.setNumShards(solrPropsConfig.getNumberOfShards());
     collectionCreateRequest.setReplicationFactor(solrPropsConfig.getReplicationFactor());
     collectionCreateRequest.setConfigName(solrPropsConfig.getConfigName());
-    collectionCreateRequest.setMaxShardsPerNode(solrPropsConfig.getReplicationFactor() * solrPropsConfig.getNumberOfShards());
+    collectionCreateRequest.setMaxShardsPerNode(calculateMaxShardsPerNode(solrPropsConfig));
     CollectionAdminResponse createResponse = collectionCreateRequest.process(solrClient);
     if (createResponse.getStatus() != 0) {
       LOG.error("Error creating collection. collectionName=" + solrPropsConfig.getCollection() + ", response=" + createResponse);
@@ -256,4 +306,8 @@ class SolrCollectionDao {
       return true;
     }
   }
+
+  private Integer calculateMaxShardsPerNode(SolrPropsConfig solrPropsConfig) {
+    return solrPropsConfig.getReplicationFactor() * solrPropsConfig.getNumberOfShards();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
new file mode 100644
index 0000000..9fa5c80
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.web.listener;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpSessionEvent;
+import javax.servlet.http.HttpSessionListener;
+
+public class LogSearchSessionListener implements HttpSessionListener {
+
+  private Logger LOG = LoggerFactory.getLogger(LogSearchSessionListener.class);
+
+  private int numberOfSessions = 0;
+
+  @Override
+  public void sessionCreated(HttpSessionEvent event) {
+    synchronized (this) {
+      numberOfSessions++;
+    }
+    LOG.debug(String.format("New session is created (Id: %s). Number of sessions: %d", event.getSession().getId(), numberOfSessions));
+  }
+
+  @Override
+  public void sessionDestroyed(HttpSessionEvent event) {
+    synchronized (this) {
+      numberOfSessions--;
+    }
+    LOG.debug(String.format("Session destroyed (Id: %s). Number of sessions: %d", event.getSession().getId(), numberOfSessions));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-logsearch/docker/test-config/logsearch/log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/test-config/logsearch/log4j.xml b/ambari-logsearch/docker/test-config/logsearch/log4j.xml
index b80824b..d0e26ed 100644
--- a/ambari-logsearch/docker/test-config/logsearch/log4j.xml
+++ b/ambari-logsearch/docker/test-config/logsearch/log4j.xml
@@ -25,7 +25,7 @@
     <param name="maxFileSize" value="10MB" />
     <param name="maxBackupIndex" value="10" />
     <layout class="org.apache.log4j.PatternLayout">
-      <param name="ConversionPattern" value="%d [%t] %-5p %C{6} (%F:%L) - %m%n" />
+      <param name="ConversionPattern" value="%d %-5p [%t] %C{6} (%F:%L) - %m%n" />
     </layout>
   </appender>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 769d0c1..9bc3be5 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -84,6 +84,10 @@ public abstract class AbstractTimelineMetricsSink {
   public int ZK_CONNECT_TRY_COUNT = 10;
   public int ZK_SLEEP_BETWEEN_RETRY_TIME = 2000;
   public boolean shardExpired = true;
+  private int zookeeperMinBackoffTimeMins = 2;
+  private int zookeeperMaxBackoffTimeMins = 5;
+  private long zookeeperBackoffTimeMillis;
+  private long lastFailedZkRequestTime = 0l;
 
   private SSLSocketFactory sslSocketFactory;
 
@@ -132,6 +136,7 @@ public abstract class AbstractTimelineMetricsSink {
     metricSinkWriteShardStrategy = new MetricSinkWriteShardHostnameHashingStrategy(getHostname());
     collectorHAHelper = new MetricCollectorHAHelper(getZookeeperQuorum(),
       ZK_CONNECT_TRY_COUNT, ZK_SLEEP_BETWEEN_RETRY_TIME);
+    zookeeperBackoffTimeMillis = getZookeeperBackoffTimeMillis();
     isInitializedForHA = true;
   }
 
@@ -217,6 +222,11 @@ public abstract class AbstractTimelineMetricsSink {
       collectorHost = findPreferredCollectHost();
     }
 
+    if (collectorHost == null) {
+      LOG.warn("No live collector to send metrics to. Metrics to be sent will be discarded.");
+      return false;
+    }
+
     String connectUrl = getCollectorUri(collectorHost);
     String jsonData = null;
     LOG.debug("EmitMetrics connectUrl = "  + connectUrl);
@@ -337,13 +347,27 @@ public abstract class AbstractTimelineMetricsSink {
     }
 
     // Reach out to all configured collectors before Zookeeper
-    refreshCollectorsFromConfigured();
+    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+    refreshCollectorsFromConfigured(collectorHosts);
 
     // Lookup Zookeeper for live hosts - max 10 seconds wait time
-    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null) {
-      //TODO : Bring back Zk fallback after proper curation.
-      LOG.info("No live collectors from configuration. Not requesting zookeeper...");
-      //allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+    long currentTime = System.currentTimeMillis();
+    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null
+      && (currentTime - lastFailedZkRequestTime) > zookeeperBackoffTimeMillis) {
+
+      LOG.info("No live collectors from configuration. Requesting zookeeper...");
+      allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+      boolean noNewCollectorFromZk = true;
+      for (String collectorHostFromZk : allKnownLiveCollectors) {
+        if (!collectorHosts.contains(collectorHostFromZk)) {
+          noNewCollectorFromZk = false;
+          break;
+        }
+      }
+      if (noNewCollectorFromZk) {
+        LOG.info("No new collector was found from Zookeeper. Will not request zookeeper for " + zookeeperBackoffTimeMillis + " millis");
+        lastFailedZkRequestTime = System.currentTimeMillis();
+      }
     }
 
     if (allKnownLiveCollectors.size() != 0) {
@@ -356,7 +380,7 @@ public abstract class AbstractTimelineMetricsSink {
             // OR
             // through Expiry (Refresh needed to pick up dead collectors that might have not become alive).
             if (shardExpired) {
-              refreshCollectorsFromConfigured();
+              refreshCollectorsFromConfigured(getConfiguredCollectorHosts());
             }
             return metricSinkWriteShardStrategy.findCollectorShard(new ArrayList<>(allKnownLiveCollectors));
           }
@@ -376,8 +400,7 @@ public abstract class AbstractTimelineMetricsSink {
     return null;
   }
 
-  private void refreshCollectorsFromConfigured() {
-    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+  private void refreshCollectorsFromConfigured(Collection<String> collectorHosts) {
 
     LOG.debug("Trying to find live collector host from : " + collectorHosts);
     if (collectorHosts != null && !collectorHosts.isEmpty()) {
@@ -477,7 +500,7 @@ public abstract class AbstractTimelineMetricsSink {
     return sb.toString();
   }
   /**
-   * Parses input Sting of format "['host1', 'host2']" into Collection of hostnames
+   * Parses input Sting of format "host1,host2" into Collection of hostnames
    */
   public Collection<String> parseHostsStringIntoCollection(String hostsString) {
     Set<String> hosts = new HashSet<>();
@@ -487,10 +510,8 @@ public abstract class AbstractTimelineMetricsSink {
       return hosts;
     }
 
-    String[] untrimmedHosts = hostsString.split(",");
 
-    for (String host : untrimmedHosts) {
-      host = StringUtils.substringBetween(host, "'");
+    for (String host : hostsString.split(",")) {
       if (StringUtils.isEmpty(host))
         continue;
       hosts.add(host.trim());
@@ -499,6 +520,12 @@ public abstract class AbstractTimelineMetricsSink {
     return hosts;
   }
 
+
+  private long getZookeeperBackoffTimeMillis() {
+    return (zookeeperMinBackoffTimeMins +
+      rand.nextInt(zookeeperMaxBackoffTimeMins - zookeeperMinBackoffTimeMins + 1)) * 60*1000l;
+  }
+
   /**
    * Get a pre-formatted URI for the collector
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
index 54c2bdf..c6f6beb 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
@@ -23,7 +23,6 @@ import org.apache.curator.CuratorZookeeperClient;
 import org.apache.curator.RetryLoop;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.retry.BoundedExponentialBackoffRetry;
-import org.apache.curator.retry.RetryUntilElapsed;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
index 5e016f8..9b0cdbe 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
@@ -32,21 +32,21 @@ public class AbstractTimelineMetricSinkTest {
     AbstractTimelineMetricsSink sink = new TestTimelineMetricsSink();
     Collection<String> hosts;
 
-    hosts = sink.parseHostsStringIntoCollection("[]");
+    hosts = sink.parseHostsStringIntoCollection("");
     Assert.assertTrue(hosts.isEmpty());
 
-    hosts = sink.parseHostsStringIntoCollection("[u'test1.123.abc.def.local']");
+    hosts = sink.parseHostsStringIntoCollection("test1.123.abc.def.local");
     Assert.assertTrue(hosts.size() == 1);
     Assert.assertTrue(hosts.contains("test1.123.abc.def.local"));
 
-    hosts = sink.parseHostsStringIntoCollection("['test1.123.abc.def.local']");
+    hosts = sink.parseHostsStringIntoCollection("test1.123.abc.def.local ");
     Assert.assertTrue(hosts.size() == 1);
     Assert.assertTrue(hosts.contains("test1.123.abc.def.local"));
 
-    hosts = sink.parseHostsStringIntoCollection("[u'test1.123.abc.def.local', u'test1.456.abc.def.local']");
+    hosts = sink.parseHostsStringIntoCollection("test1.123.abc.def.local,test1.456.abc.def.local");
     Assert.assertTrue(hosts.size() == 2);
 
-    hosts = sink.parseHostsStringIntoCollection("['test1.123.abc.def.local', 'test1.456.abc.def.local']");
+    hosts = sink.parseHostsStringIntoCollection("test1.123.abc.def.local, test1.456.abc.def.local");
     Assert.assertTrue(hosts.size() == 2);
     Assert.assertTrue(hosts.contains("test1.123.abc.def.local"));
     Assert.assertTrue(hosts.contains("test1.456.abc.def.local"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
index a0bc2e8..a393a96 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
@@ -49,7 +49,6 @@ import static org.powermock.api.easymock.PowerMock.verifyAll;
 @PrepareForTest({AbstractTimelineMetricsSink.class, URL.class, HttpURLConnection.class, MetricCollectorHAHelper.class})
 public class MetricCollectorHATest {
 
-  @Ignore
   @Test
   public void findCollectorUsingZKTest() throws Exception {
     InputStream is = createNiceMock(InputStream.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
index 36ec074..32fe32e 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
@@ -130,6 +130,10 @@ public class HandleConnectExceptionTest {
       return super.emitMetrics(metrics);
     }
 
+    @Override
+    protected synchronized String findPreferredCollectHost() {
+      return "localhost";
+    }
 
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-grafana/conf/unix/ambari-metrics-grafana
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/conf/unix/ambari-metrics-grafana b/ambari-metrics/ambari-metrics-grafana/conf/unix/ambari-metrics-grafana
index 472942e..8a8ac8f 100644
--- a/ambari-metrics/ambari-metrics-grafana/conf/unix/ambari-metrics-grafana
+++ b/ambari-metrics/ambari-metrics-grafana/conf/unix/ambari-metrics-grafana
@@ -104,7 +104,7 @@ case "$1" in
 
     # Prepare environment
     # mkdir -p "$LOG_DIR" "$DATA_DIR" && chown "$GRAFANA_USER":"$GRAFANA_GROUP" "$LOG_DIR" "$DATA_DIR"
-    # touch "$PID_FILE" && chown "$GRAFANA_USER":"$GRAFANA_GROUP" "$PID_FILE"
+    touch "$PID_FILE" && chown "$GRAFANA_USER":"$GRAFANA_GROUP" "$PID_FILE"
 
     # if [ -n "$MAX_OPEN_FILES" ]; then
     #   ulimit -n $MAX_OPEN_FILES
@@ -116,10 +116,10 @@ case "$1" in
     return=$?
     if [ $return -eq 0 ]
     then
-      sleep 1
+      sleep 5
       # check if pid file has been written two
       if ! [[ -s $PID_FILE ]]; then
-        echo "Start FAILED" >> $LOG_FILE
+        echo "Start FAILED because daemon did not write pid in pid_file" >> $LOG_FILE
         exit 1
       fi
       i=0

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index 802e684..b2ca52e 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -194,13 +194,13 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
   }
 
   /**
-   * Parses input Stings array of format "['host1'", '"host2']" into Collection of hostnames
+   * Parses input Stings array of format "host1,host2" into Collection of hostnames
    */
   protected Collection<String> parseHostsStringArrayIntoCollection(String[] hostStrings) {
     Collection<String> result = new HashSet<>();
     if (hostStrings == null) return result;
     for (String s : hostStrings) {
-      result.addAll(parseHostsStringIntoCollection(s));
+      result.add(s.trim());
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index da00155..0cafd9a 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -100,7 +100,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString("slave.host.name")).andReturn("localhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"['localhost", "'localhost2']"}).anyTimes();
+    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"localhost"," localhost2"}).anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PROTOCOL), eq("http"))).andReturn("http").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PORT), eq("6188"))).andReturn("6188").anyTimes();
@@ -172,7 +172,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString("slave.host.name")).andReturn("localhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"['localhost", "'localhost2']"}).anyTimes();
+    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"localhost", "localhost2"}).anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PROTOCOL), eq("http"))).andReturn("http").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PORT), eq("6188"))).andReturn("6188").anyTimes();
@@ -302,7 +302,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString("slave.host.name")).andReturn("localhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"['localhost", "'localhost2']"}).anyTimes();
+    expect(conf.getStringArray(eq(COLLECTOR_HOSTS_PROPERTY))).andReturn(new String[]{"localhost", "localhost2"}).anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PROTOCOL), eq("http"))).andReturn("http").anyTimes();
     expect(conf.getString(eq(COLLECTOR_PORT), eq("6188"))).andReturn("6188").anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 9f95c26..7010187 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -102,7 +102,7 @@ config_content = """
 [default]
 debug_level = INFO
 hostname = localhost
-metrics_servers = ['localhost','host1','host2']
+metrics_servers = localhost
 enable_time_threshold = false
 enable_value_threshold = false
 
@@ -217,16 +217,13 @@ class Configuration:
 
   def get_metrics_collector_hosts(self):
     hosts = self.get("default", "metrics_servers", "localhost")
-    if hosts is not "localhost":
-      return ast.literal_eval(hosts)
-    else:
-      return hosts
+    return hosts.split(",")
 
   def get_failover_strategy(self):
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
   def get_failover_strategy_blacklisted_interval_seconds(self):
-    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 600)
+    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 300)
 
   def get_hostname_script(self):
     if self.hostname_script:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py
index de97292..4056ae3 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/test/python/core/TestEmitter.py
@@ -83,7 +83,7 @@ class TestEmitter(TestCase):
     emitter.RETRY_SLEEP_INTERVAL = .001
     emitter.submit_metrics()
 
-    self.assertEqual(request_mock.call_count, 9)
+    self.assertEqual(request_mock.call_count, 3)
     self.assertUrlData(request_mock)
 
   def assertUrlData(self, request_mock):

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 75ce845..844ee41 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -107,16 +107,14 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       // Initialize policies before TTL update
       hBaseAccessor.initPoliciesAndTTL();
       // Start HA service
-      if (configuration.isDistributedOperationModeEnabled()) {
-        // Start the controller
-        haController = new MetricCollectorHAController(configuration);
-        try {
-          haController.initializeHAController();
-        } catch (Exception e) {
-          LOG.error(e);
-          throw new MetricsSystemInitializationException("Unable to " +
-            "initialize HA controller", e);
-        }
+      // Start the controller
+      haController = new MetricCollectorHAController(configuration);
+      try {
+        haController.initializeHAController();
+      } catch (Exception e) {
+        LOG.error(e);
+        throw new MetricsSystemInitializationException("Unable to " +
+          "initialize HA controller", e);
       }
 
       String whitelistFile = metricsConf.get(TIMELINE_METRICS_WHITELIST_FILE, "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 9858e4c..27caf88 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -320,6 +320,20 @@ public class TimelineMetricConfiguration {
     return hbaseConf.getTrimmed("hbase.zookeeper.quorum");
   }
 
+  public String getClusterZKClientPort() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.property.clientPort", "2181");
+  }
+
+  public String getClusterZKQuorum() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.quorum");
+  }
+
   public String getInstanceHostnameFromEnv() throws UnknownHostException {
     String amsInstanceName = System.getProperty("AMS_INSTANCE_NAME");
     if (amsInstanceName == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
index de63d4e..62f8137 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
@@ -317,9 +317,9 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
   protected void downsample(Connection conn, Long startTime, Long endTime) {
 
-    LOG.info("Checking for downsampling requests.");
+    LOG.debug("Checking for downsampling requests.");
     if (CollectionUtils.isEmpty(configuredDownSamplers)) {
-      LOG.info("No downsamplers configured");
+      LOG.debug("No downsamplers configured");
       return;
     }
 
@@ -424,7 +424,7 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
     PreparedStatement stmt = null;
     ResultSet rs = null;
-    LOG.info("Downsampling query : " + condition.getStatement());
+    LOG.debug("Downsampling query : " + condition.getStatement());
 
     try {
       stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
index edce367..26702b6 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
@@ -81,8 +81,8 @@ public class MetricCollectorHAController {
     }
 
     try {
-      String zkClientPort = configuration.getZKClientPort();
-      String zkQuorum = configuration.getZKQuorum();
+      String zkClientPort = configuration.getClusterZKClientPort();
+      String zkQuorum = configuration.getClusterZKQuorum();
 
       if (StringUtils.isEmpty(zkClientPort) || StringUtils.isEmpty(zkQuorum)) {
         throw new Exception("Unable to parse zookeeper quorum. clientPort = "

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
index 462213d..3688630 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.HBaseTimelineMetricStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
 import org.apache.zookeeper.ClientCnxn;
 import org.easymock.EasyMock;
@@ -71,7 +73,7 @@ import static org.powermock.api.support.membermodification.MemberMatcher.method;
 import static org.powermock.api.support.membermodification.MemberModifier.suppress;
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({ PhoenixHBaseAccessor.class, UserGroupInformation.class,
+@PrepareForTest({ PhoenixHBaseAccessor.class, HBaseTimelineMetricStore.class, UserGroupInformation.class,
   ClientCnxn.class, DefaultPhoenixDataSource.class, ConnectionFactory.class,
   TimelineMetricConfiguration.class, ApplicationHistoryServer.class })
 @PowerMockIgnore( {"javax.management.*"})
@@ -179,6 +181,8 @@ public class TestApplicationHistoryServer {
     expect(metricConfiguration.getTimelineMetricsServiceHandlerThreadCount()).andReturn(20).anyTimes();
     expect(metricConfiguration.getWebappAddress()).andReturn("localhost:9990").anyTimes();
     expect(metricConfiguration.getTimelineServiceRpcAddress()).andReturn("localhost:10299").anyTimes();
+    expect(metricConfiguration.getClusterZKQuorum()).andReturn("localhost").anyTimes();
+    expect(metricConfiguration.getClusterZKClientPort()).andReturn("2181").anyTimes();
 
     Connection connection = createNiceMock(Connection.class);
     Statement stmt = createNiceMock(Statement.class);
@@ -197,6 +201,14 @@ public class TestApplicationHistoryServer {
     connection.close();
     expectLastCall();
 
+    MetricCollectorHAController haControllerMock = PowerMock.createMock(MetricCollectorHAController.class);
+    expectNew(MetricCollectorHAController.class, metricConfiguration)
+      .andReturn(haControllerMock);
+
+    haControllerMock.initializeHAController();
+    expectLastCall().once();
+    expect(haControllerMock.isInitialized()).andReturn(false).anyTimes();
+
     org.apache.hadoop.hbase.client.Connection conn = createNiceMock(org.apache.hadoop.hbase.client.Connection.class);
     mockStatic(ConnectionFactory.class);
     expect(ConnectionFactory.createConnection((Configuration) anyObject())).andReturn(conn);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
index 1e4bac0..875f013 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
@@ -51,8 +51,8 @@ public class MetricCollectorHAControllerTest extends AbstractMiniHBaseClusterTes
     String port = zkUrl.split(":")[3];
     String quorum = zkUrl.split(":")[2];
 
-    expect(configuration.getZKClientPort()).andReturn(port);
-    expect(configuration.getZKQuorum()).andReturn(quorum);
+    expect(configuration.getClusterZKClientPort()).andReturn(port);
+    expect(configuration.getClusterZKQuorum()).andReturn(quorum);
 
     replay(configuration);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-project/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index a38c6a2..52c9992 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -30,6 +30,8 @@
     <ambari.dir>${project.parent.basedir}</ambari.dir>
     <powermock.version>1.6.3</powermock.version>
     <jetty.version>8.1.19.v20160209</jetty.version>
+    <checkstyle.version>6.19</checkstyle.version> <!-- last version that does not require Java 8 -->
+    <checkstyle.skip>false</checkstyle.skip>
   </properties>
   <profiles>
     <profile>
@@ -477,6 +479,11 @@
         <artifactId>jline</artifactId>
         <version>2.11</version>
       </dependency>
+      <dependency>
+        <groupId>com.puppycrawl.tools</groupId>
+        <artifactId>checkstyle</artifactId>
+        <version>${checkstyle.version}</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
   <build>
@@ -486,6 +493,40 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
         </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-checkstyle-plugin</artifactId>
+          <version>2.17</version>
+          <executions>
+            <execution>
+              <id>checkstyle</id>
+              <phase>test</phase>
+              <configuration>
+                <configLocation>${project.basedir}/checkstyle.xml</configLocation>
+                <encoding>UTF-8</encoding>
+                <consoleOutput>true</consoleOutput>
+                <failsOnError>true</failsOnError>
+                <linkXRef>false</linkXRef>
+                <skip>${checkstyle.skip}</skip>
+              </configuration>
+              <goals>
+                <goal>check</goal>
+              </goals>
+            </execution>
+          </executions>
+          <dependencies>
+            <dependency>
+              <groupId>com.puppycrawl.tools</groupId>
+              <artifactId>checkstyle</artifactId>
+              <version>${checkstyle.version}</version>
+            </dependency>
+            <dependency>
+              <groupId>utility</groupId>
+              <artifactId>utility</artifactId>
+              <version>1.0.0.0-SNAPSHOT</version>
+            </dependency>
+          </dependencies>
+        </plugin>
       </plugins>
     </pluginManagement>
     <plugins>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/checkstyle.xml
----------------------------------------------------------------------
diff --git a/ambari-server/checkstyle.xml b/ambari-server/checkstyle.xml
new file mode 100644
index 0000000..81f6380
--- /dev/null
+++ b/ambari-server/checkstyle.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
+  may not use this file except in compliance with the License. You may obtain
+  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
+  required by applicable law or agreed to in writing, software distributed
+  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  See accompanying LICENSE file. -->
+<!DOCTYPE module PUBLIC
+  "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
+  "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+<module name="Checker">
+  <module name="TreeWalker">
+    <module name="AvoidTransactionalOnPrivateMethodsCheck"/>
+  </module>
+</module>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index d1277c3..08757a2 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -686,6 +686,10 @@
         <artifactId>jetty-maven-plugin</artifactId>
         <version>${jetty.version}</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+      </plugin>
     </plugins>
     <resources>
       <resource>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
index 2980f38..01195bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
@@ -45,9 +45,9 @@ public class AgentRequests {
 
   public void setExecutionDetailsRequest(String host, String component, String requestExecutionCmd) {
     if (StringUtils.isNotBlank(requestExecutionCmd)) {
-      LOG.debug("Setting need for exec command to " + requestExecutionCmd + " for " + component);
       Map<String, Boolean> perHostRequiresExecCmdDetails = getPerHostRequiresExecCmdDetails(host);
       if (Boolean.TRUE.toString().toUpperCase().equals(requestExecutionCmd.toUpperCase())) {
+        LOG.info("Setting need for exec command to " + requestExecutionCmd + " for " + component);
         perHostRequiresExecCmdDetails.put(component, Boolean.TRUE);
       } else {
         perHostRequiresExecCmdDetails.put(component, Boolean.FALSE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 29737ee..ef1ee4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -343,6 +343,7 @@ public class ExecutionCommand extends AgentCommand {
     String SCRIPT_TYPE = "script_type";
     String SERVICE_PACKAGE_FOLDER = "service_package_folder";
     String HOOKS_FOLDER = "hooks_folder";
+    String CUSTOM_FOLDER = "custom_folder";
     String STACK_NAME = "stack_name";
     String SERVICE_TYPE = "service_type";
     String STACK_VERSION = "stack_version";

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
index 7f24bf4..45497b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
@@ -236,10 +236,10 @@ public class CheckDescription {
 
   public static CheckDescription HARDCODED_STACK_VERSION_PROPERTIES_CHECK = new CheckDescription("HARDCODED_STACK_VERSION_PROPERTIES_CHECK",
     PrereqCheckType.CLUSTER,
-    "Found hardcoded hdp stack version in property value.",
+    "Found hardcoded stack version in property value.",
     new ImmutableMap.Builder<String, String>()
       .put(AbstractCheckDescriptor.DEFAULT,
-          "Some properties seem to contain hardcoded hdp version string \"%s\"." +
+          "Some properties seem to contain hardcoded stack version string \"%s\"." +
           " That is a potential problem when doing stack update.").build());
 
   public static CheckDescription VERSION_MISMATCH = new CheckDescription("VERSION_MISMATCH",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 2a78905..c3cd82e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -25,6 +25,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_RETRY_ENABLED;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
@@ -54,7 +55,6 @@ import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -79,10 +79,10 @@ import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
@@ -894,17 +894,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   @Override
   public Config createConfig(Cluster cluster, String type, Map<String, String> properties,
                              String versionTag, Map<String, Map<String, String>> propertiesAttributes) {
-    Config config = configFactory.createNew(cluster, type,
-      properties, propertiesAttributes);
 
-    if (!StringUtils.isEmpty(versionTag)) {
-      config.setTag(versionTag);
-    }
-
-    config.persist();
+    Config config = configFactory.createNew(cluster, type, versionTag, properties,
+        propertiesAttributes);
 
     cluster.addConfig(config);
-
     return config;
   }
 
@@ -1253,7 +1247,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               throw new HostNotFoundException(cluster.getClusterName(), sch.getHostName());
             }
 
-            r.setMaintenanceState(maintenanceStateHelper.getEffectiveState(sch, host).name());
+            MaintenanceState effectiveMaintenanceState = maintenanceStateHelper.getEffectiveState(sch, host);
+            if(filterByMaintenanceState(request, effectiveMaintenanceState)) {
+              continue;
+            }
+            r.setMaintenanceState(effectiveMaintenanceState.name());
+
             response.add(r);
           } catch (ServiceComponentHostNotFoundException e) {
             if (request.getServiceName() == null || request.getComponentName() == null) {
@@ -1304,7 +1303,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               throw new HostNotFoundException(cluster.getClusterName(), sch.getHostName());
             }
 
-            r.setMaintenanceState(maintenanceStateHelper.getEffectiveState(sch, host).name());
+            MaintenanceState effectiveMaintenanceState = maintenanceStateHelper.getEffectiveState(sch, host);
+            if(filterByMaintenanceState(request, effectiveMaintenanceState)) {
+              continue;
+            }
+            r.setMaintenanceState(effectiveMaintenanceState.name());
+
             response.add(r);
           }
         }
@@ -1313,6 +1317,24 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return response;
   }
 
+  private boolean filterByMaintenanceState(ServiceComponentHostRequest request, MaintenanceState effectiveMaintenanceState) {
+    if (request.getMaintenanceState() != null) {
+      MaintenanceState desiredMaintenanceState = MaintenanceState.valueOf(request.getMaintenanceState());
+      if (desiredMaintenanceState.equals(MaintenanceState.ON)) {
+        /*
+         * if we want components with ON state it can be one of IMPLIED_FROM_SERVICE,
+         * IMPLIED_FROM_SERVICE_AND_HOST, IMPLIED_FROM_HOST, ON, ro simply - not OFF
+         */
+        if (effectiveMaintenanceState.equals(MaintenanceState.OFF)) {
+          return true;
+        }
+      } else if (!desiredMaintenanceState.equals(effectiveMaintenanceState)){
+        return true;
+      }
+    }
+    return false;
+  }
+
   @Override
   public MaintenanceState getEffectiveMaintenanceState(ServiceComponentHost sch)
       throws AmbariException {
@@ -2217,6 +2239,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       serviceInfo.getServicePackageFolder());
     commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
+    String customCacheDirectory = componentInfo.getCustomFolder();
+    if (customCacheDirectory != null) {
+      File customCache = new File(configs.getResourceDirPath(), customCacheDirectory);
+      if (customCache.exists() && customCache.isDirectory()) {
+        commandParams.put(CUSTOM_FOLDER, customCacheDirectory);
+      }
+    }
+
     String clusterName = cluster.getClusterName();
     if (customCommandExecutionHelper.isTopologyRefreshRequired(roleCommand.name(), clusterName, serviceName)) {
       commandParams.put(ExecutionCommand.KeyNames.REFRESH_TOPOLOGY, "True");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
index 8e5674e..4010528 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
@@ -374,7 +374,7 @@ public class AlertTargetResourceProvider extends
    */
   @Transactional
   @SuppressWarnings("unchecked")
-  private void updateAlertTargets(long alertTargetId,
+  void updateAlertTargets(long alertTargetId,
       Map<String, Object> requestMap)
       throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index ec846f8..b26e056 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2526,9 +2526,10 @@ public class BlueprintConfigurationProcessor {
         }
 
         boolean isAtlasInCluster = topology.getBlueprint().getServices().contains("ATLAS");
+        boolean isAtlasHiveHookEnabled = Boolean.parseBoolean(properties.get("hive-env").get("hive.atlas.hook"));
 
         // Append atlas hook if not already present.
-        if (isAtlasInCluster) {
+        if (isAtlasInCluster || isAtlasHiveHookEnabled) {
           if (!hiveHooksClean.contains(atlasHookClass)) {
             hiveHooksClean.add(atlasHookClass);
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
index 3a86aef..3c415df 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
@@ -53,6 +53,17 @@ public class CalculatedStatus {
    */
   private final double percent;
 
+  /**
+   * A status which represents a COMPLETED state at 0%
+   */
+  public static final CalculatedStatus COMPLETED = new CalculatedStatus(HostRoleStatus.COMPLETED,
+      HostRoleStatus.COMPLETED, 100.0);
+
+  /**
+   * A status which represents a PENDING state at 0%
+   */
+  public static final CalculatedStatus PENDING = new CalculatedStatus(HostRoleStatus.PENDING,
+      HostRoleStatus.PENDING, 0.0);
 
   // ----- Constructors ------------------------------------------------------
 
@@ -79,12 +90,6 @@ public class CalculatedStatus {
     this.percent = percent;
   }
 
-  /**
-   * Static factory method to get Status that represents a Completed state
-   */
-  public static CalculatedStatus getCompletedStatus() {
-    return new CalculatedStatus(HostRoleStatus.COMPLETED, HostRoleStatus.COMPLETED, 100.0);
-  }
 
   // ----- CalculatedStatus --------------------------------------------------
 
@@ -291,14 +296,25 @@ public class CalculatedStatus {
   }
 
   /**
-   * Calculates the overall status of an upgrade.
-   * @param stageDto  the map of stage-to-summary value objects
-   * @param stageIds  the stage ids to consider from the value objects
+   * Calculates the overall status of an upgrade. If there are no tasks, then a
+   * status of {@link HostRoleStatus#COMPLETED} is returned.
+   *
+   * @param stageDto
+   *          the map of stage-to-summary value objects
+   * @param stageIds
+   *          the stage ids to consider from the value objects
    * @return the calculated status
    */
   public static CalculatedStatus statusFromStageSummary(Map<Long, HostRoleCommandStatusSummaryDTO> stageDto,
       Set<Long> stageIds) {
 
+    // if either are empty, then we have no tasks and therefore no status - we
+    // should return COMPLETED. This can happen if someone removes all tasks but
+    // leaves the stages and request
+    if (stageDto.isEmpty() || stageIds.isEmpty()) {
+      return COMPLETED;
+    }
+
     Collection<HostRoleStatus> stageStatuses = new HashSet<>();
     Collection<HostRoleStatus> stageDisplayStatuses = new HashSet<>();
     Collection<HostRoleStatus> taskStatuses = new ArrayList<>();
@@ -378,19 +394,28 @@ public class CalculatedStatus {
    */
   public static HostRoleStatus calculateSummaryStatusOfStage(Map<HostRoleStatus, Integer> counters,
       int total, boolean skippable) {
+
+    // when there are 0 tasks, return COMPLETED
+    if (total == 0) {
+      return HostRoleStatus.COMPLETED;
+    }
+
     if (counters.get(HostRoleStatus.PENDING) == total) {
       return HostRoleStatus.PENDING;
     }
+
     // By definition, any tasks in a future stage must be held in a PENDING status.
     if (counters.get(HostRoleStatus.HOLDING) > 0 || counters.get(HostRoleStatus.HOLDING_FAILED) > 0 || counters.get(HostRoleStatus.HOLDING_TIMEDOUT) > 0) {
       return counters.get(HostRoleStatus.HOLDING) > 0 ? HostRoleStatus.HOLDING :
       counters.get(HostRoleStatus.HOLDING_FAILED) > 0 ? HostRoleStatus.HOLDING_FAILED :
       HostRoleStatus.HOLDING_TIMEDOUT;
     }
+
     // Because tasks are not skippable, guaranteed to be FAILED
     if (counters.get(HostRoleStatus.FAILED) > 0 && !skippable) {
       return HostRoleStatus.FAILED;
     }
+
     // Because tasks are not skippable, guaranteed to be TIMEDOUT
     if (counters.get(HostRoleStatus.TIMEDOUT) > 0  && !skippable) {
       return HostRoleStatus.TIMEDOUT;
@@ -401,9 +426,11 @@ public class CalculatedStatus {
     if (counters.get(HostRoleStatus.ABORTED) > 0 && numActiveTasks == 0) {
       return HostRoleStatus.ABORTED;
     }
+
     if (counters.get(HostRoleStatus.COMPLETED) == total) {
       return HostRoleStatus.COMPLETED;
     }
+
     return HostRoleStatus.IN_PROGRESS;
   }
 
@@ -415,7 +442,8 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  private static HostRoleStatus calculateSummaryStatusOfUpgrade(Map<HostRoleStatus, Integer> counters, int total) {
+  protected static HostRoleStatus calculateSummaryStatusOfUpgrade(
+      Map<HostRoleStatus, Integer> counters, int total) {
     return calculateSummaryStatusOfStage(counters, total, false);
   }
 
@@ -428,8 +456,8 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  private static HostRoleStatus calculateSummaryDisplayStatus(Map<HostRoleStatus, Integer> counters,
-                                                              int total, boolean skippable) {
+  protected static HostRoleStatus calculateSummaryDisplayStatus(
+      Map<HostRoleStatus, Integer> counters, int total, boolean skippable) {
     return counters.get(HostRoleStatus.SKIPPED_FAILED) > 0 ? HostRoleStatus.SKIPPED_FAILED :
            counters.get(HostRoleStatus.FAILED) > 0 ? HostRoleStatus.FAILED:
            calculateSummaryStatusOfStage(counters, total, skippable);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 42aae84..d417ec2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -442,7 +442,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
+  void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
       String desiredRepoVersion, RepositoryVersionState repoState)
       throws AmbariException, SystemException {
     final String clusterName = cluster.getClusterName();
@@ -482,7 +482,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
+  RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
       List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
       throws AmbariException, SystemException {
     final AmbariManagementController managementController = getManagementController();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index 96bb8f9..2373068 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.ConfigGroupNotFoundException;
@@ -48,7 +57,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
@@ -56,15 +65,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Inject;
 
 @StaticallyInject
 public class ConfigGroupResourceProvider extends
@@ -102,6 +103,12 @@ public class ConfigGroupResourceProvider extends
   private static HostDAO hostDAO;
 
   /**
+   * Used for creating {@link Config} instances to return in the REST response.
+   */
+  @Inject
+  private static ConfigFactory configFactory;
+
+  /**
    * Create a  new resource provider for the given management controller.
    *
    * @param propertyIds          the property ids
@@ -568,22 +575,19 @@ public class ConfigGroupResourceProvider extends
         }
       }
 
+      configLogger.info("User {} is creating new configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       ConfigGroup configGroup = configGroupFactory.createNew(cluster,
         request.getGroupName(),
         request.getTag(), request.getDescription(),
         request.getConfigs(), hosts);
 
-      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
       configGroup.setServiceName(serviceName);
 
-      // Persist before add, since id is auto-generated
-      configLogger.info("Persisting new Config group"
-        + ", clusterName = " + cluster.getClusterName()
-        + ", name = " + configGroup.getName()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
+      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
@@ -634,6 +638,11 @@ public class ConfigGroupResourceProvider extends
                                  + ", clusterName = " + request.getClusterName()
                                  + ", groupId = " + request.getId());
       }
+
+      configLogger.info("User {} is updating configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       String serviceName = configGroup.getServiceName();
       String requestServiceName = cluster.getServiceForConfigTypes(request.getConfigs().keySet());
       if (StringUtils.isEmpty(serviceName) && StringUtils.isEmpty(requestServiceName)) {
@@ -682,13 +691,6 @@ public class ConfigGroupResourceProvider extends
       configGroup.setDescription(request.getDescription());
       configGroup.setTag(request.getTag());
 
-      configLogger.info("Persisting updated Config group"
-        + ", clusterName = " + configGroup.getClusterName()
-        + ", id = " + configGroup.getId()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
-
-      configGroup.persist();
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
           request.getServiceConfigVersionNote(), configGroup);
@@ -781,11 +783,7 @@ public class ConfigGroupResourceProvider extends
             }
           }
 
-          Config config = new ConfigImpl(type);
-          config.setTag(tag);
-          config.setProperties(configProperties);
-          config.setPropertiesAttributes(configAttributes);
-
+          Config config = configFactory.createReadOnly(type, tag, configProperties, configAttributes);
           configurations.put(config.getType(), config);
         }
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 8c1bc57..0690ee7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -69,10 +69,10 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;
-import org.apache.commons.lang.StringUtils;
 
 /**
  * Resource provider for request resources.
@@ -566,13 +566,18 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
 
     @Override
     public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
 
       ServiceComponentTuple that = (ServiceComponentTuple) o;
 
-      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null)
+      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) {
         return false;
+      }
       return !(componentName != null ? !componentName.equals(that.componentName) : that.componentName != null);
 
     }
@@ -739,15 +744,20 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
     // get summaries from TopologyManager for logical requests
     summary.putAll(topologyManager.getStageSummaries(entity.getRequestId()));
 
+    // summary might be empty due to delete host have cleared all
+    // HostRoleCommands or due to hosts haven't registered yet with the cluster
+    // when the cluster is provisioned with a Blueprint
+    final CalculatedStatus status;
     LogicalRequest logicalRequest = topologyManager.getRequest(entity.getRequestId());
-
-    CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
-    if (summary.isEmpty() && logicalRequest == null) {
-
-      // summary might be empty due to delete host have cleared all HostRoleCommands
-      // or due to hosts haven't registered yet with the cluster when the cluster is provisioned
-      // with a Blueprint
-      status = CalculatedStatus.getCompletedStatus();
+    if (summary.isEmpty() && null != logicalRequest) {
+      // in this case, it appears that there are no tasks but this is a logical
+      // topology request, so it's a matter of hosts simply not registering yet
+      // for tasks to be created
+      status = CalculatedStatus.PENDING;
+    } else {
+      // there are either tasks or this is not a logical request, so do normal
+      // status calculations
+      status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
     }
 
     setResourceProperty(resource, REQUEST_STATUS_PROPERTY_ID, status.getStatus().toString(), requestedPropertyIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index 59dd9d9..a778882 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -339,7 +339,7 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     CalculatedStatus status;
     if (summary.isEmpty()) {
       // Delete host might have cleared all HostRoleCommands
-      status = CalculatedStatus.getCompletedStatus();
+      status = CalculatedStatus.COMPLETED;
     } else {
       status = CalculatedStatus.statusFromStageSummary(summary, Collections.singleton(entity.getStageId()));
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index fd38d17..a45ccff 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -956,7 +956,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   @Transactional
-  private UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
+  UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
       RequestStageContainer request,
       UpgradeEntity upgradeEntity) throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
index 1c135b2..5c0bdb1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
@@ -23,9 +23,9 @@ import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.AbstractService;
 import com.google.inject.Inject;
 import org.apache.ambari.server.AmbariService;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.commons.collections.CollectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,9 +64,6 @@ public class LogSearchDataRetrievalService extends AbstractService {
   private static Logger LOG = LoggerFactory.getLogger(LogSearchDataRetrievalService.class);
 
   @Inject
-  private Configuration configuration;
-
-  @Inject
   private LoggingRequestHelperFactory loggingRequestHelperFactory;
 
   /**
@@ -288,7 +285,7 @@ public class LogSearchDataRetrievalService extends AbstractService {
             helper.sendGetLogFileNamesRequest(component, host);
 
           // update the cache if result is available
-          if (logFileNamesResult != null) {
+          if (CollectionUtils.isNotEmpty(logFileNamesResult)) {
             LOG.debug("LogSearchFileNameRequestRunnable: request was successful, updating cache");
             final String key = generateKey(component, host);
             // update cache with returned result

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
new file mode 100644
index 0000000..a779068
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.server.controller.logging;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Package protected singleton for storing Cookie key value pairs for Logging Service.
+ * This has chosen instead of using CookieManager to avoid using system wide Cookie handling
+ */
+class LoggingCookieStore {
+  public static final LoggingCookieStore INSTANCE = new LoggingCookieStore();
+
+  private final Map<String, String> cookiesMap = new HashMap<>();
+
+  private LoggingCookieStore() {
+  }
+
+  public Map<String, String> getCookiesMap() {
+    return cookiesMap;
+  }
+
+  public void addCookie(String cookieName, String cookieValue) {
+    cookiesMap.put(cookieName, cookieValue);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
index eab0c04..358c1b7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
@@ -26,6 +26,7 @@ import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
 import org.apache.http.client.utils.URIBuilder;
 import org.apache.log4j.Logger;
 import org.codehaus.jackson.map.AnnotationIntrospector;
@@ -39,11 +40,14 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.StringReader;
+import java.net.HttpCookie;
 import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -76,6 +80,10 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
 
   private static final String PAGE_SIZE_QUERY_PARAMETER_NAME = "pageSize";
 
+  private static final String COOKIE_HEADER = "Cookie";
+
+  private static final String SET_COOKIES_HEADER = "Set-Cookie";
+
   private static final int DEFAULT_LOGSEARCH_CONNECT_TIMEOUT_IN_MILLISECONDS = 5000;
 
   private static final int DEFAULT_LOGSEARCH_READ_TIMEOUT_IN_MILLISECONDS = 5000;
@@ -109,20 +117,20 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
       // use the Apache builder to create the correct URI
       URI logSearchURI = createLogSearchQueryURI("http", queryParameters);
       LOG.debug("Attempting to connect to LogSearch server at " + logSearchURI);
-
-      HttpURLConnection httpURLConnection  = (HttpURLConnection)logSearchURI.toURL().openConnection();
+      HttpURLConnection httpURLConnection  = (HttpURLConnection) logSearchURI.toURL().openConnection();
       httpURLConnection.setRequestMethod("GET");
       httpURLConnection.setConnectTimeout(DEFAULT_LOGSEARCH_CONNECT_TIMEOUT_IN_MILLISECONDS);
       httpURLConnection.setReadTimeout(DEFAULT_LOGSEARCH_READ_TIMEOUT_IN_MILLISECONDS);
 
+      addCookiesFromCookieStore(httpURLConnection);
 
       setupCredentials(httpURLConnection);
 
       StringBuffer buffer = networkConnection.readQueryResponseFromServer(httpURLConnection);
+      addCookiesToCookieStoreFromResponse(httpURLConnection);
 
       // setup a reader for the JSON response
-      StringReader stringReader =
-        new StringReader(buffer.toString());
+      StringReader stringReader = new StringReader(buffer.toString());
 
       ObjectReader logQueryResponseReader =
         createObjectReader(LogQueryResponse.class);
@@ -137,6 +145,27 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
     return null;
   }
 
+  private void addCookiesFromCookieStore(HttpURLConnection httpURLConnection) {
+    if (LoggingCookieStore.INSTANCE.getCookiesMap().size() > 0) {
+      List<String> cookiesStrList = new ArrayList<>();
+      for (Map.Entry<String, String> entry : LoggingCookieStore.INSTANCE.getCookiesMap().entrySet()) {
+        cookiesStrList.add(String.format("%s=%s", entry.getKey(), entry.getValue()));
+      }
+      httpURLConnection.setRequestProperty(COOKIE_HEADER, StringUtils.join(cookiesStrList, "; "));
+    }
+  }
+
+  private void addCookiesToCookieStoreFromResponse(HttpURLConnection httpURLConnection) {
+    Map<String, List<String>> headerFields = httpURLConnection.getHeaderFields();
+    List<String> cookiesHeader = headerFields.get(SET_COOKIES_HEADER);
+    if (cookiesHeader != null) {
+      for (String cookie : cookiesHeader) {
+        HttpCookie cookie1 = HttpCookie.parse(cookie).get(0);
+        LoggingCookieStore.INSTANCE.addCookie(cookie1.getName(), cookie1.getValue());
+      }
+    }
+  }
+
 
   private void setupCredentials(HttpURLConnection httpURLConnection) {
     final String logSearchAdminUser =
@@ -224,10 +253,14 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
       HttpURLConnection httpURLConnection = (HttpURLConnection) logLevelQueryURI.toURL().openConnection();
       httpURLConnection.setRequestMethod("GET");
 
+      addCookiesFromCookieStore(httpURLConnection);
+
       setupCredentials(httpURLConnection);
 
       StringBuffer buffer = networkConnection.readQueryResponseFromServer(httpURLConnection);
 
+      addCookiesToCookieStoreFromResponse(httpURLConnection);
+
       // setup a reader for the JSON response
       StringReader stringReader =
         new StringReader(buffer.toString());
@@ -374,6 +407,7 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
         BufferedReader reader = new BufferedReader(new InputStreamReader(resultStream));
         LOG.debug("Response code from LogSearch Service is = " + httpURLConnection.getResponseCode());
 
+
         String line = reader.readLine();
         StringBuffer buffer = new StringBuffer();
         while (line != null) {


[17/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..215a6ee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>content</name>
+    <display-name>hdfs-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..ac141d1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,153 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>dfs.namenode.audit.log.async</name>
+    <value>true</value>
+    <description>Whether to enable async auditlog</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.fslock.fair</name>
+    <value>false</value>
+    <description>Whether fsLock is fair</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+    <value>3600</value>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/hadoop/hdfs/journalnode</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.client.retry.policy.enabled</name>
+    <value>false</value>
+    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.content-summary.limit</name>
+    <value>5000</value>
+    <description>Dfs content summary limit.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encryption.key.provider.uri</name>
+    <description>
+      The KeyProvider to use when interacting with encryption keys used
+      when reading and writing to an encryption zone.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>nfs.file.dump.dir</name>
+    <value>/tmp/.hdfs-nfs</value>
+    <display-name>NFSGateway dump directory</display-name>
+    <description>
+      This directory is used to temporarily save out-of-order writes before
+      writing to HDFS. For each file, the out-of-order writes are dumped after
+      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+      One needs to make sure the directory has enough space.
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfs.exports.allowed.hosts</name>
+    <value>* rw</value>
+    <description>
+      By default, the export can be mounted by any client. To better control the access,
+      users can update the following property. The value string contains machine name and access privilege,
+      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
+      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
+      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
+      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
+    </description>
+    <display-name>Allowed hosts</display-name>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer.cipher.suites</name>
+    <value>AES/CTR/NoPadding</value>
+    <description>
+      This value may be either undefined or AES/CTR/NoPadding. If defined, then 
+      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption. 
+      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm 
+      is used. By default, the property is not defined.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.inode.attributes.provider.class</name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
new file mode 100644
index 0000000..fd41817
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
@@ -0,0 +1,217 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/db/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs are deleted in HDP 2.5. -->
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..b31742c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HDFS</display-name>
+    <description>This user must be system user and also present at Ranger
+      admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hdfs-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HDFS</display-name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false" />
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
new file mode 100644
index 0000000..1bc83df
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
new file mode 100644
index 0000000..1b0a821
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>ranger.plugin.hdfs.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing Hdfs policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.add-hadoop-authorization</name>
+    <value>true</value>
+    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
new file mode 100644
index 0000000..4a645b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
@@ -0,0 +1,649 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.NumOpenConnections",
+              "metric_path": "metrics/rpc/client/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.NumOpenConnections",
+              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Client Connections",
+              "value": "${rpc.rpc.client.NumOpenConnections}"
+            },
+            {
+              "name": "Open Datanode Connections",
+              "value": "${rpc.rpc.datanode.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Client RPC Queue Wait time",
+              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Client RPC Processing time",
+              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Queue Wait time",
+              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Processing time",
+              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Rate per second of number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps._rate",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Blocks With Corrupted Replicas",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Blocks With Corrupted Replicas",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
new file mode 100644
index 0000000..9000e95
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
@@ -0,0 +1,246 @@
+{
+  "services": [
+    {
+      "name": "HDFS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "HDFS_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        },
+        {
+          "name": "NAMENODE",
+          "identities": [
+            {
+              "name": "hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            },
+            {
+              "name": "namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"                
+              },
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DATANODE",
+          "identities": [
+            {
+              "name": "datanode_dn",
+              "principal": {
+                "value": "dn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/dn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.datanode.keytab.file"
+              }
+            }
+          ],
+          "configurations" : [
+            {
+              "hdfs-site" : {
+                "dfs.datanode.address" : "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SECONDARY_NAMENODE",
+          "identities": [
+            {
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NFS_GATEWAY",
+          "identities": [
+            {
+              "name": "nfsgateway",
+              "principal": {
+                "value": "nfs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/nfs.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "JOURNALNODE",
+          "identities": [
+            {
+              "name": "journalnode_jn",
+              "principal": {
+                "value": "jn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..30c49c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
@@ -0,0 +1,190 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.7.1.3.0</version>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
+
+      <components>
+        <!-- NFS Gateway was added in HDP 2.3. -->
+        <component>
+          <name>NFS_GATEWAY</name>
+          <displayName>NFSGateway</displayName>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nfsgateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <versionAdvertised>true</versionAdvertised>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>rpcbind</name>
+              <condition>should_install_rpcbind</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}-native</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}-native</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-client</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-datanode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-journalnode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-namenode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-secondarynamenode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-zkfc</name>
+            </package>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>hadooplzo-${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>libhdfs0-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
new file mode 100644
index 0000000..5318ba0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"dfs.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"hdfs-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "namenode_ui",
+        "label": "NameNode UI",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_logs",
+        "label": "NameNode Logs",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_jmx",
+        "label": "NameNode JMX",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "Thread Stacks",
+        "label": "Thread Stacks",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
new file mode 100644
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+


[02/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/resources/dashboards/README.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/dashboards/README.txt b/ambari-server/src/test/resources/dashboards/README.txt
new file mode 100644
index 0000000..919d510
--- /dev/null
+++ b/ambari-server/src/test/resources/dashboards/README.txt
@@ -0,0 +1,18 @@
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+Test directory for sync'ing a components custom folder - AMBARI-19018

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
index f4195a8..7629552 100644
--- a/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
@@ -57,6 +57,7 @@
           <name>DATANODE</name>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
+          <customFolder>dashboards</customFolder>
           <commandScript>
             <script>scripts/datanode.py</script>
             <scriptType>PYTHON</scriptType>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/GANGLIA/upgrades/HDP/rolling-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/GANGLIA/upgrades/HDP/rolling-upgrade.xml b/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/GANGLIA/upgrades/HDP/rolling-upgrade.xml
new file mode 100644
index 0000000..1d34c59
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/GANGLIA/upgrades/HDP/rolling-upgrade.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="upgrade-pack.xsd">
+  <target>*</target>
+  <target-stack>*</target-stack>
+  <type>ROLLING</type>
+  <order>
+    <group name="GANGLIA_UPGRADE" title="Ganglia">
+      <add-after-group>ZOOKEEPER_CONFIG_CONDITION_TEST</add-after-group>
+      <skippable>true</skippable>
+      <allow-retry>true</allow-retry>
+      <service name="GANGLIA">
+        <component>HST_SERVER</component>
+      </service>
+    </group>
+  </order>
+
+  <processing>
+    <service name="GANGLIA">
+      <component name="GANGLIA_SERVER">
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+    </service>
+  </processing>
+</upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index 4631f09..ce93b34 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -249,11 +249,10 @@ App.UpdateController = Em.Controller.extend({
         ]);
       }
       else {
-        // clusterController.isHostsLoaded may be changed in callback, that is why it's value is cached before calling callback
-        isHostsLoaded = App.router.get('clusterController.isHostsLoaded');
-        callback();
         // On pages except for hosts/hostDetails, making sure hostsMapper loaded only once on page load, no need to update, but at least once
+        isHostsLoaded = App.router.get('clusterController.isHostsLoaded');
         if (isHostsLoaded) {
+          callback();
           return;
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
index b6ffe5b..e1252b7 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
@@ -28,8 +28,6 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
 
   mastersToShow: ['JOURNALNODE'],
 
-  mastersToAdd: [],
-
   showInstalledMastersFirst: true,
 
   JOURNALNODES_COUNT_MINIMUM: 3, // TODO get this from stack
@@ -41,14 +39,9 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
    * @param masterComponents
    */
   renderComponents: function(masterComponents) {
-    var jns = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE');
-    var count = jns.get('length');
-    this.set('mastersToAdd', []);
-    if (masterComponents.filterProperty('component_name', 'JOURNALNODE').length == 0) {
-      for (var i = 0; i < count; i++) {
-        this.get('mastersToAdd').push('JOURNALNODE');
-      }
-    }
+    //check if we are restoring components assignment by checking existing of JOURNALNODE component in array
+    var restoringComponents = masterComponents.someProperty('component_name', 'JOURNALNODE');
+    masterComponents = restoringComponents ? masterComponents : masterComponents.concat(this.generateJournalNodeComponents());
     this._super(masterComponents);
     this.updateJournalNodeInfo();
     this.showHideJournalNodesAddRemoveControl();
@@ -56,6 +49,19 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
   },
 
   /**
+   * Create JOURNALNODE components to add them to masters array
+   */
+  generateJournalNodeComponents: function () {
+    var journalNodes = [];
+    App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').forEach(function (jn) {
+      var jnComponent = this.createComponentInstallationObject(Em.Object.create({serviceName: jn.get('service.serviceName'), componentName: jn.get('componentName')}), jn.get('hostName'));
+      jnComponent.isInstalled = true;
+      journalNodes.push(jnComponent);
+    }, this);
+    return journalNodes;
+  },
+
+  /**
    * Enable/Disable show/hide operation for each JournalNode
    */
   showHideJournalNodesAddRemoveControl: function() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
index 329b8fe..1938527 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
@@ -60,11 +60,7 @@ App.ManageJournalNodeWizardStep4Controller = App.ManageJournalNodeProgressPageCo
 
   reconfigureHDFS: function () {
     var data = this.get('content.serviceConfigProperties');
-    if (App.get('isKerberosEnabled')) {
-      // TODO this.reconfigureSecureHDFS();
-    } else {
-      this.updateConfigProperties(data);
-    }
+    this.updateConfigProperties(data);
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
index 2305524..fe9a15b 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
@@ -120,22 +120,30 @@ App.ManageJournalNodeWizardController = App.WizardController.extend({
   },
 
   getJournalNodesToAdd: function () {
-    return this.get('content.masterComponentHosts').filterProperty('component', 'JOURNALNODE')
-      .filterProperty('isInstalled', false).mapProperty('hostName');
+    var result = [];
+    var masterComponentHosts = this.get('content.masterComponentHosts');
+    if (masterComponentHosts) {
+      result = masterComponentHosts.filterProperty('component', 'JOURNALNODE').filterProperty('isInstalled', false).mapProperty('hostName');
+    }
+    return result;
   },
 
   getJournalNodesToDelete: function () {
-    var existingHosts = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').mapProperty('hostName');
-    var currentJNs = this.get('content.masterComponentHosts').filterProperty('component', 'JOURNALNODE');
-    var removed = existingHosts.filter(function(host) {
-      return currentJNs.filterProperty('hostName', host).length == 0;
-    });
-    return removed;
+    var result = [];
+    var masterComponentHosts = this.get('content.masterComponentHosts');
+    if (masterComponentHosts) {
+      var currentJNs = masterComponentHosts.filterProperty('component', 'JOURNALNODE');
+      var existingHosts = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').mapProperty('hostName');
+      result = existingHosts.filter(function(host) {
+        return currentJNs.filterProperty('hostName', host).length == 0;
+      });
+    }
+    return result;
   },
 
   isDeleteOnly: function () {
-    return this.getJournalNodesToAdd().length == 0 && this.getJournalNodesToDelete().length > 0;
-  },
+    return this.get('currentStep') > 1 && this.getJournalNodesToAdd().length == 0 && this.getJournalNodesToDelete().length > 0;
+  }.property('content.masterComponentHosts', 'App.router.clusterController.isHostsLoaded', 'currentStep'),
 
   /**
    * Save config properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step7_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step7_controller.js
index ae27506..9e14281 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step7_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step7_controller.js
@@ -22,14 +22,24 @@ App.HighAvailabilityWizardStep7Controller = App.HighAvailabilityProgressPageCont
 
   name:"highAvailabilityWizardStep7Controller",
 
-  commands: ['startRanger', 'startZooKeeperServers', 'startNameNode'],
+  commands: ['startZooKeeperServers', 'startAmbariInfra', 'startRanger', 'startNameNode'],
 
   initializeTasks: function () {
     this._super();
+    var tasksToRemove = [];
+
+    if (!App.Service.find().someProperty('serviceName', 'AMBARI_INFRA')) {
+      tasksToRemove.push('startAmbariInfra');
+    }
 
     if (!App.Service.find().someProperty('serviceName', 'RANGER')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'startRanger').get('id'), 1);
+      tasksToRemove.push('startRanger');
     }
+    this.removeTasks(tasksToRemove);
+  },
+
+  startAmbariInfra: function () {
+    this.startServices(false, ['AMBARI_INFRA'], true);
   },
 
   startRanger: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step9_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step9_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step9_controller.js
index 3c220c1..713e1c3 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step9_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/nameNode/step9_controller.js
@@ -31,30 +31,27 @@ App.HighAvailabilityWizardStep9Controller = App.HighAvailabilityProgressPageCont
 
   initializeTasks: function () {
     this._super();
-    var numSpliced = 0;
+    var tasksToRemove = [];
 
     // find hostname where second namenode will be installed
     this.set('secondNameNodeHost', this.get('content.masterComponentHosts').filterProperty('component', 'NAMENODE').findProperty('isInstalled', false).hostName);
 
     if (!App.Service.find().someProperty('serviceName', 'PXF') || this.isPxfComponentInstalled()) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'installPXF').get('id'), 1);
-      numSpliced = 1;
+      tasksToRemove.push('installPXF');
     }
     if (!App.Service.find().someProperty('serviceName', 'RANGER')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'reconfigureRanger').get('id') - numSpliced, 1);
-      numSpliced++;
+      tasksToRemove.push('reconfigureRanger');
     }
     if (!App.Service.find().someProperty('serviceName', 'HBASE')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'reconfigureHBase').get('id') - numSpliced, 1);
-      numSpliced++;
+      tasksToRemove.push('reconfigureHBase');
     }
     if (!App.Service.find().someProperty('serviceName', 'ACCUMULO')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'reconfigureAccumulo').get('id') - numSpliced, 1);
-      numSpliced++ ;
+      tasksToRemove.push('reconfigureAccumulo');
     }
     if (!App.Service.find().someProperty('serviceName', 'HAWQ')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'reconfigureHawq').get('id') - numSpliced, 1);
+      tasksToRemove.push('reconfigureHawq');
     }
+    this.removeTasks(tasksToRemove);
   },
 
   startSecondNameNode: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step4_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step4_controller.js
index 722ae94..0495be9 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step4_controller.js
@@ -32,10 +32,11 @@ App.RMHighAvailabilityWizardStep4Controller = App.HighAvailabilityProgressPageCo
 
   initializeTasks: function () {
     this._super();
-    var numSpliced = 0;
+    var tasksToRemove = [];
     if (!App.Service.find().someProperty('serviceName', 'HAWQ')) {
-      this.get('tasks').splice(this.get('tasks').findProperty('command', 'reconfigureHAWQ').get('id'), 1);
+      tasksToRemove.push('reconfigureHAWQ');
     }
+    this.removeTasks(tasksToRemove);
   },
 
   stopRequiredServices: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index ebf6c39..3058580 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -52,6 +52,16 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
   upgradeType: null,
 
   /**
+   * @type {Em.Object}
+   */
+  upgradeTypeConfig: Em.computed.findByKey('upgradeMethods', 'type', 'upgradeType'),
+
+  /**
+   * @type {boolean}
+   */
+  cantBeStarted: Em.computed.alias('upgradeTypeConfig.cantBeStarted'),
+
+  /**
    * @type {boolean}
    * @default true
    */
@@ -160,7 +170,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
       precheckResultsMessage: '',
       precheckResultsTitle: '',
       action: '',
-      isWizardRestricted: !App.supports.enabledWizardForHostOrderedUpgrade
+      isWizardRestricted: !App.supports.enabledWizardForHostOrderedUpgrade,
+      cantBeStarted: true
     })
   ],
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/admin/stack_upgrade_history_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_upgrade_history_controller.js b/ambari-web/app/controllers/main/admin/stack_upgrade_history_controller.js
index 9bec825..8a60d83 100644
--- a/ambari-web/app/controllers/main/admin/stack_upgrade_history_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_upgrade_history_controller.js
@@ -213,5 +213,5 @@ App.MainAdminStackUpgradeHistoryController = Em.ArrayController.extend({
     } else {
       return ''
     }
-  }.property('upgradeData.Upgrade.request_status'),
+  }.property('upgradeData.Upgrade.request_status')
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/summary.js b/ambari-web/app/controllers/main/service/info/summary.js
index 821b796..09f3429 100644
--- a/ambari-web/app/controllers/main/service/info/summary.js
+++ b/ambari-web/app/controllers/main/service/info/summary.js
@@ -386,25 +386,30 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
           var property = context.get('componentName') ? 'componentName' : 'serviceName';
           var serviceDefinitions = this.get('controller.content').filterProperty(property, context.get(property));
           // definitions should be sorted in order: critical, warning, ok, unknown, other
-          var criticalDefinitions = [], warningDefinitions = [], okDefinitions = [], unknownDefinitions = [];
+          var definitionTypes = {
+            "isCritical": [],
+            "isWarning": [],
+            "isOK": [],
+            "isUnknown": []
+          };
+
           serviceDefinitions.forEach(function (definition) {
-            if (definition.get('isCritical')) {
-              criticalDefinitions.push(definition);
-              serviceDefinitions = serviceDefinitions.without(definition);
-            } else if (definition.get('isWarning')) {
-              warningDefinitions.push(definition);
-              serviceDefinitions = serviceDefinitions.without(definition);
-            } else if (definition.get('isOK')) {
-              okDefinitions.push(definition);
-              serviceDefinitions = serviceDefinitions.without(definition);
-            } else if (definition.get('isUnknown')) {
-              unknownDefinitions.push(definition);
-              serviceDefinitions = serviceDefinitions.without(definition);
-            }
+            Object.keys(definitionTypes).forEach(function (type) {
+              if (definition.get(type)) {
+                definition.set('isCollapsed', true);
+                definitionTypes[type].push(definition);
+                serviceDefinitions = serviceDefinitions.without(definition);
+              }
+            });
           });
-          serviceDefinitions = criticalDefinitions.concat(warningDefinitions, okDefinitions, unknownDefinitions, serviceDefinitions);
+          serviceDefinitions = definitionTypes.isCritical.concat(definitionTypes.isWarning, definitionTypes.isOK, definitionTypes.isUnknown, serviceDefinitions);
+
           return serviceDefinitions;
         }.property('controller.content'),
+        onToggleBlock: function (alert) {
+          this.$('#' + alert.context.clientId).toggle('blind', 500);
+          alert.context.set("isCollapsed", !alert.context.get("isCollapsed"));
+        },
         gotoAlertDetails: function (event) {
           if (event && event.context) {
             this.get('parentView').hide();
@@ -855,4 +860,4 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
     App.router.route('main' + event.context.href);
   }
 
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/service/reassign/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step4_controller.js b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
index 26ac68c..da7d2d9 100644
--- a/ambari-web/app/controllers/main/service/reassign/step4_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
@@ -71,6 +71,12 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
   hostComponents: [],
 
   /**
+   * List of components, that do not need reconfiguration for moving to another host
+   * Reconfigure command will be skipped
+   */
+  componentsWithoutReconfiguration: ['METRICS_COLLECTOR'],
+
+  /**
    * Map with lists of related services.
    * Used to define list of services to stop/start.
    */
@@ -189,14 +195,6 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
       }
     },
     {
-      componentName: 'METRICS_COLLECTOR',
-      configs: {
-        'ams-site': {
-          'timeline.metrics.service.webapp.address': '<replace-value>:6188'
-        }
-      }
-    },
-    {
       componentName: 'HISTORYSERVER',
       configs: {
         'mapred-site': {
@@ -398,32 +396,16 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
       this.removeTasks(['startZooKeeperServers', 'startNameNode']);
     }
 
+    if (this.get('componentsWithoutReconfiguration').contains(componentName)) {
+      this.removeTasks(['reconfigure']);
+    }
+
     if (!this.get('content.reassignComponentsInMM.length')) {
       this.removeTasks(['stopHostComponentsInMaintenanceMode']);
     }
   },
 
   /**
-   * remove tasks by command name
-   */
-  removeTasks: function(commands) {
-    var tasks = this.get('tasks');
-
-    commands.forEach(function(command) {
-      var cmd = tasks.filterProperty('command', command);
-      var index = null;
-
-      if (cmd.length === 0) {
-        return false;
-      } else {
-        index = tasks.indexOf( cmd[0] );
-      }
-
-      tasks.splice( index, 1 );
-    });
-  },
-
-  /**
    * initialize tasks
    */
   initializeTasks: function () {
@@ -542,7 +524,6 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
     'HIVE_SERVER': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
     'HIVE_METASTORE': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
     'MYSQL_SERVER': ['hive-site'],
-    'METRICS_COLLECTOR': ['ams-site'],
     'HISTORYSERVER': ['mapred-site']
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/main/service/reassign/step6_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step6_controller.js b/ambari-web/app/controllers/main/service/reassign/step6_controller.js
index 113686b..80f1bf6 100644
--- a/ambari-web/app/controllers/main/service/reassign/step6_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step6_controller.js
@@ -99,25 +99,6 @@ App.ReassignMasterWizardStep6Controller = App.HighAvailabilityProgressPageContro
     }
   },
 
-  /**
-   * remove tasks by command name
-   */
-  removeTasks: function(commands) {
-    var tasks = this.get('tasks');
-
-    commands.forEach(function(command) {
-      var index;
-      tasks.forEach(function(_task, _index) {
-        if (_task.get('command') === command) {
-          index = _index;
-        }
-      });
-      if (!Em.isNone(index)) {
-        tasks.splice(index, 1);
-      }
-    }, this);
-  },
-
   hideRollbackButton: function () {
     var failedTask = this.get('tasks').findProperty('showRollback');
     if (failedTask) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/controllers/wizard/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step3_controller.js b/ambari-web/app/controllers/wizard/step3_controller.js
index 793d33d..45180fe 100644
--- a/ambari-web/app/controllers/wizard/step3_controller.js
+++ b/ambari-web/app/controllers/wizard/step3_controller.js
@@ -1081,7 +1081,7 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
    */
   getHostCheckTasksSuccess: function (data) {
     if (!data) {
-      return;
+      return this.getGeneralHostCheck();
     }
     if (["FAILED", "COMPLETED", "TIMEDOUT"].contains(data.Requests.request_status)) {
       if (data.Requests.inputs.indexOf("last_agent_env_check") != -1) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/data/HDP2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index 5b66506..e2a4d4e 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -2066,95 +2066,81 @@ var hdp2properties = [
     "index": 5
   },
   {
-    "name": "logsearch_solr_audit_logs_zk_node",
-    "serviceName": "LOGSEARCH",
-    "filename": "logsearch-env.xml",
-    "category": "Advanced logsearch-env",
-    "index": 6
-  },
-  {
-    "name": "logsearch_solr_audit_logs_zk_quorum",
-    "serviceName": "LOGSEARCH",
-    "filename": "logsearch-env.xml",
-    "category": "Advanced logsearch-env",
-    "index": 7
-  },
-  {
     "name": "logsearch_debug_enabled",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 8
+    "index": 6
   },
   {
     "name": "logsearch_debug_port",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 9
+    "index": 7
   },
   {
     "name": "logsearch_truststore_location",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 10
+    "index": 8
   },
   {
     "name": "logsearch_truststore_type",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 11
+    "index": 9
   },
   {
     "name": "logsearch_truststore_password",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 12
+    "index": 10
   },
   {
     "name": "logsearch_keystore_location",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 13
+    "index": 11
   },
   {
     "name": "logsearch_keystore_type",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 14
+    "index": 12
   },
   {
     "name": "logsearch_keystore_password",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 15
+    "index": 13
   },
   {
     "name": "logsearch_kerberos_keytab",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 16
+    "index": 14
   },
   {
     "name": "logsearch_kerberos_principal",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 17
+    "index": 15
   },
   {
     "name": "content",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 18
+    "index": 16
   },
   /*logsearch-log4j*/
   {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/mappers/stack_upgrade_history_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/stack_upgrade_history_mapper.js b/ambari-web/app/mappers/stack_upgrade_history_mapper.js
index 63088a7..823ae80 100644
--- a/ambari-web/app/mappers/stack_upgrade_history_mapper.js
+++ b/ambari-web/app/mappers/stack_upgrade_history_mapper.js
@@ -50,5 +50,5 @@ App.stackUpgradeHistoryMapper = App.QuickDataMapper.create({
     App.store.loadMany(this.get('model'), result);
     App.store.commit();
     App.set('isStackUpgradeHistoryLoaded',true);
-  },
+  }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 5c7e0e8..945acfb 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -344,6 +344,7 @@ Em.I18n.translations = {
   'common.end.time': 'End Time',
   'common.rolling': 'Rolling',
   'common.express': 'Express',
+  'common.hostOrdered': 'Host Ordered',
   'common.rolling.downgrade': 'Rolling Downgrade',
   'common.express.downgrade': 'Express Downgrade',
 
@@ -1403,9 +1404,10 @@ Em.I18n.translations = {
   'admin.highAvailability.wizard.step5.task4.title':'Start JournalNodes',
   'admin.highAvailability.wizard.step5.task5.title':'Disable Secondary NameNode',
 
-  'admin.highAvailability.wizard.step7.task0.title':'Start Ranger',
-  'admin.highAvailability.wizard.step7.task1.title':'Start ZooKeeper Servers',
-  'admin.highAvailability.wizard.step7.task2.title':'Start NameNode',
+  'admin.highAvailability.wizard.step7.task0.title':'Start ZooKeeper Servers',
+  'admin.highAvailability.wizard.step7.task1.title':'Start Ambari Infra',
+  'admin.highAvailability.wizard.step7.task2.title':'Start Ranger',
+  'admin.highAvailability.wizard.step7.task3.title':'Start NameNode',
 
   'admin.highAvailability.wizard.step9.task0.title':'Start Additional NameNode',
   'admin.highAvailability.wizard.step9.task1.title':'Install Failover Controllers',
@@ -3155,7 +3157,6 @@ Em.I18n.translations = {
   'config.infoMessage.wrong.value.for.combobox.widget': '"{0}" is not available in the list of valid values',
   'config.warnMessage.outOfBoundaries.greater': 'Values greater than {0} are not recommended',
   'config.warnMessage.outOfBoundaries.less': 'Values smaller than {0} are not recommended',
-  'config.warnMessage.llap_queue_capacity.max': 'LLAP will consume entire Yarn queue',
 
   'errorMessage.config.required': 'This is required',
   'errorMessage.config.number.integer': 'Must contain digits only',

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/mixins.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins.js b/ambari-web/app/mixins.js
index d3ccbbe..ad682d7 100644
--- a/ambari-web/app/mixins.js
+++ b/ambari-web/app/mixins.js
@@ -66,4 +66,3 @@ require('mixins/common/widgets/widget_mixin');
 require('mixins/common/widgets/widget_section');
 require('mixins/unit_convert/base_unit_convert_mixin');
 require('mixins/unit_convert/convert_unit_widget_view_mixin');
-require('utils/configs/mount_points_based_initializer_mixin');

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js b/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
index 7517efd..aa4f77c 100644
--- a/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
+++ b/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
@@ -85,6 +85,7 @@ App.ExportMetricsMixin = Em.Mixin.create({
       ticksNumber,
       metricsNumber,
       metricsArray;
+    this.checkGraphDataForValidity(data);
     titles = data.map(function (item) {
       return displayUnit ? item.name + ' (' + displayUnit + ')' : item.name;
     }, this);
@@ -95,12 +96,35 @@ App.ExportMetricsMixin = Em.Mixin.create({
     for (var i = 0; i < ticksNumber; i++) {
       metricsArray.push([data[0].data[i][1]]);
       for (var j = 0; j < metricsNumber; j++) {
-        metricsArray[i + 1].push(data[j].data[i][0]);
+         metricsArray[i + 1].push(data[j].data[i][0]);
       };
-    }
+    };
     return stringUtils.arrayToCSV(metricsArray);
   },
 
+  checkGraphDataForValidity: function (data) {
+    data.sort(function (a, b) {
+      return b.data.length - a.data.length
+    });
+
+    var maxLength = data[0].data.length;
+
+    for (var i = 1; i < data.length; i ++) {
+      if (data[i].data.length !== maxLength) this.fillGraphDataArrayWithMockedData(data[i], maxLength);
+    }
+  },
+
+  fillGraphDataArrayWithMockedData: function (dataArray, neededLength) {
+    var startIndex = dataArray.data.length,
+      timestampInterval = dataArray.data[2][1] - dataArray.data[1][1];
+
+    for (var i = startIndex; i < neededLength; i++) {
+      var previousTimestamp = dataArray.data[i - 1][1];
+
+      dataArray.data.push([null, previousTimestamp + timestampInterval]);
+    }
+  },
+
   jsonReplacer: function () {
     var displayUnit = this.get('targetView.displayUnit');
     return function (key, value) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/mixins/wizard/assign_master_components.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js
index d8ada11..2c4245d 100644
--- a/ambari-web/app/mixins/wizard/assign_master_components.js
+++ b/ambari-web/app/mixins/wizard/assign_master_components.js
@@ -1236,6 +1236,7 @@ App.AssignMasterComponents = Em.Mixin.create({
       },
       onSecondary: function () {
         this._super();
+        App.router.set('nextBtnClickInProgress', false);
         self.set('submitButtonClicked', false);
       },
       onClose: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/mixins/wizard/wizardProgressPageController.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/wizardProgressPageController.js b/ambari-web/app/mixins/wizard/wizardProgressPageController.js
index 3ee8a9a..d426eda 100644
--- a/ambari-web/app/mixins/wizard/wizardProgressPageController.js
+++ b/ambari-web/app/mixins/wizard/wizardProgressPageController.js
@@ -272,6 +272,26 @@ App.wizardProgressPageControllerMixin = Em.Mixin.create(App.InstallComponent, {
     }
   },
 
+  /**
+   * remove tasks by command name
+   */
+  removeTasks: function(commands) {
+    var tasks = this.get('tasks');
+
+    commands.forEach(function(command) {
+      var cmd = tasks.filterProperty('command', command);
+      var index = null;
+
+      if (cmd.length === 0) {
+        return false;
+      } else {
+        index = tasks.indexOf( cmd[0] );
+      }
+
+      tasks.splice( index, 1 );
+    });
+  },
+
   setTaskStatus: function (taskId, status) {
     this.get('tasks').findProperty('id', taskId).set('status', status);
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/models/configs/objects/service_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config_property.js b/ambari-web/app/models/configs/objects/service_config_property.js
index 5d85ae0..4459946 100644
--- a/ambari-web/app/models/configs/objects/service_config_property.js
+++ b/ambari-web/app/models/configs/objects/service_config_property.js
@@ -358,13 +358,6 @@ App.ServiceConfigProperty = Em.Object.extend({
     } else if ((typeof this.get('value') != 'object') && ((this.get('value') + '').length === 0)) {
       var widgetType = this.get('widgetType');
       this.set('errorMessage', (this.get('isRequired') && (!['test-db-connection','label'].contains(widgetType))) ? Em.I18n.t('errorMessage.config.required') : '');
-    } else if (this.get('name') === 'llap_queue_capacity') {
-      if (!isNaN(parseInt(this.get('value'), 10)) && parseInt(this.get('value'), 10) === 100) {
-        this.set('warnMessage', Em.I18n.t('config.warnMessage.llap_queue_capacity.max'));
-      } else {
-        this.set('warnMessage', '');
-        this.set('errorMessage', this.validateErrors(this.get('value'), this.get('name'), this.get('retypedPassword')));
-      }
     } else {
       this.set('errorMessage', this.validateErrors(this.get('value'), this.get('name'), this.get('retypedPassword')));
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/models/stack_service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 0136ec3..f824b6a 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -46,7 +46,9 @@ App.StackService = DS.Model.extend({
   configs: DS.attr('array'),
   requiredServices: DS.attr('array', {defaultValue: []}),
 
-  isDisabled: Em.computed.or('isMandatory', 'isInstalled'),
+  isDisabled: function () {
+    return this.get('isInstalled') || (this.get('isMandatory') && !App.get('router.clusterInstallCompleted'));
+  }.property('isMandatory', 'isInstalled', 'App.router.clusterInstallCompleted'),
 
   /**
    * @type {String[]}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/models/stack_version/stack_upgrade_history.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_version/stack_upgrade_history.js b/ambari-web/app/models/stack_version/stack_upgrade_history.js
index d4a89dc..9e0439c 100644
--- a/ambari-web/app/models/stack_version/stack_upgrade_history.js
+++ b/ambari-web/app/models/stack_version/stack_upgrade_history.js
@@ -31,7 +31,7 @@ App.StackUpgradeHistory = DS.Model.extend({
   skipServiceCheckFailures: DS.attr('boolean'),
   endTime: DS.attr('number'),
   startTime: DS.attr('number'),
-  createTime: DS.attr('number'),
+  createTime: DS.attr('number')
 });
 
 App.StackUpgradeHistory.FIXTURES = [];

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/routes/manage_journalnode_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/manage_journalnode_routes.js b/ambari-web/app/routes/manage_journalnode_routes.js
index 50e3ec9..7f004e0 100644
--- a/ambari-web/app/routes/manage_journalnode_routes.js
+++ b/ambari-web/app/routes/manage_journalnode_routes.js
@@ -117,7 +117,7 @@ module.exports = App.WizardRoute.extend({
       controller.saveConfigTag(stepController.get("hdfsSiteTag"));
       controller.saveNameServiceId(stepController.get('content.nameServiceId'));
       App.set('router.nextBtnClickInProgress', false);
-      if (controller.isDeleteOnly()) {
+      if (controller.get('isDeleteOnly')) {
         router.transitionTo('step4');
       } else {
         router.transitionTo('step3');
@@ -166,7 +166,7 @@ module.exports = App.WizardRoute.extend({
     next: function (router) {
       var controller = router.get('manageJournalNodeWizardController');
       controller.clearTasksData();
-      if (controller.isDeleteOnly()) {
+      if (controller.get('isDeleteOnly')) {
         router.transitionTo('step8');
       } else {
         router.transitionTo('step5');

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/styles/stack_versions.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/stack_versions.less b/ambari-web/app/styles/stack_versions.less
index 715bbe3..a721596 100644
--- a/ambari-web/app/styles/stack_versions.less
+++ b/ambari-web/app/styles/stack_versions.less
@@ -507,8 +507,13 @@
     margin: 10px;
     .method-option {
       width: 43.5%;
+      margin-bottom: 5px;
     }
     .thumbnail {
+      background-color: #e4e4e4;
+      .method-name,.method-icon,.method-description {
+        color: #333333;
+      }
 
       cursor: pointer;
       border: none;
@@ -528,12 +533,6 @@
       }
     }
 
-    .ROLLING, .NON_ROLLING {
-      background-color: #e4e4e4;
-      .method-name,.method-icon,.method-description {
-        color: #333333;
-      }
-    }
     .ROLLING {
       .method-icon {
         margin-left: 88px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs b/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
index f7ee486..ac8ee41 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
@@ -27,11 +27,11 @@
                             <li class="nav-header">{{t admin.manageJournalNode.wizard.header}}</li>
                             <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t admin.manageJournalNode.wizard.step1.header}}</a></li>
                             <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t admin.manageJournalNode.wizard.step2.header}}</a></li>
-                            <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t admin.manageJournalNode.wizard.step3.header}}</a></li>
+                            <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t admin.manageJournalNode.wizard.step3.header}}</a></li>
                             <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t admin.manageJournalNode.wizard.step4.header}}</a></li>
-                            <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}>{{t admin.manageJournalNode.wizard.step5.header}}</a></li>
-                            <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}>{{t admin.manageJournalNode.wizard.step6.header}}</a></li>
-                            <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep7 target="controller"}}>{{t admin.manageJournalNode.wizard.step7.header}}</a></li>
+                            <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}>{{t admin.manageJournalNode.wizard.step5.header}}</a></li>
+                            <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}>{{t admin.manageJournalNode.wizard.step6.header}}</a></li>
+                            <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep7 target="controller"}}>{{t admin.manageJournalNode.wizard.step7.header}}</a></li>
                             <li {{bindAttr class="isStep8:active view.isStep8Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep8 target="controller"}}>{{t admin.manageJournalNode.wizard.step8.header}}</a></li>
                         </ul>
                     </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
index 5369ebd..abe164c 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
@@ -20,36 +20,39 @@
   <div class="text version-text">{{{view.versionText}}}</div>
   <div class="text method-text">{{t admin.stackVersions.version.upgrade.upgradeOptions.bodyMsg.method}}</div>
   <div {{bindAttr class=":row-fluid :method-options view.isInUpgradeWizard:disabled"}}>
-    {{#each method in view.upgradeMethods}}
-      <div class="method-option span5">
-        <div {{bindAttr class="method.allowed::not-allowed method.allowed::not-allowed-by-version
-          method.isPrecheckFailed:not-allowed method.isPrecheckFailed:check-failed
-          method.selected:selected method.type :thumbnail"}}
-          {{action selectMethod method target="view"}}>
-            <div {{bindAttr class="method.icon :method-icon"}}></div>
-            <div class="method-name">{{method.displayName}}</div>
-            <div class="method-description">{{{method.description}}}</div>
-            {{#if view.showPreUpgradeChecks}}
-              {{#if method.isCheckRequestInProgress}}
-                <div class="method-precheck-message checking">
-                  {{view App.SpinnerView message="admin.stackVersions.version.upgrade.upgradeOptions.preCheck.msg.checking"}}
-                </div>
-              {{else}}
-                <div {{bindAttr class=":method-precheck-message method.precheckResultsMessageClass"}}>
-                  <i {{bindAttr class="method.precheckResultsMessageIconClass"}}></i>
-                  <b>{{method.precheckResultsTitle}}</b>&nbsp;
-                  <a {{action runAction method target="view"}}>
-                    {{method.precheckResultsMessage}}
-                  </a>
-                  {{#if method.bypassedFailures}}
-                    <div class="alert-danger">{{t admin.stackVersions.version.upgrade.upgradeOptions.errors_bypassed}}</div>
+      {{#each method in view.upgradeMethods}}
+          {{#unless method.cantBeStarted}}
+            <div class="method-option span5">
+              <div {{bindAttr class="method.allowed::not-allowed method.allowed::not-allowed-by-version
+                    method.isPrecheckFailed:not-allowed method.isPrecheckFailed:check-failed
+                    method.selected:selected method.type :thumbnail"}}
+                  {{action selectMethod method target="view"}}>
+                <div {{bindAttr class="method.icon :method-icon"}}></div>
+                <div class="method-name">{{method.displayName}}</div>
+                <div class="method-description">{{{method.description}}}</div>
+                  {{#if view.showPreUpgradeChecks}}
+                      {{#if method.isCheckRequestInProgress}}
+                        <div class="method-precheck-message checking">
+                            {{view App.SpinnerView message="admin.stackVersions.version.upgrade.upgradeOptions.preCheck.msg.checking"}}
+                        </div>
+                      {{else}}
+                        <div {{bindAttr class=":method-precheck-message method.precheckResultsMessageClass"}}>
+                          <i {{bindAttr class="method.precheckResultsMessageIconClass"}}></i>
+                          <b>{{method.precheckResultsTitle}}</b>&nbsp;
+                          <a {{action runAction method target="view"}}>
+                              {{method.precheckResultsMessage}}
+                          </a>
+                            {{#if method.bypassedFailures}}
+                              <div
+                                class="alert-danger">{{t admin.stackVersions.version.upgrade.upgradeOptions.errors_bypassed}}</div>
+                            {{/if}}
+                        </div>
+                      {{/if}}
                   {{/if}}
-                </div>
-              {{/if}}
-            {{/if}}
-        </div>
-      </div>
-    {{/each}}
+              </div>
+            </div>
+          {{/unless}}
+      {{/each}}
   </div>
 
   <div class="text tolerance-text">{{t admin.stackVersions.version.upgrade.upgradeOptions.bodyMsg.tolerance}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/service_alert_popup.hbs b/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
index cbadf06..6ab6b3f 100644
--- a/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
+++ b/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
@@ -22,11 +22,16 @@
       <li class="alert-list-wrap">
         <div class="row-fluid">
           <div class="span8 name-text">
-            <div>
-              <a href="#" {{action "gotoAlertDetails" alert target="view"}}>{{alert.label}}</a>
-            </div>
-            <div class="definition-latest-text">
-              {{alert.latestText}}
+            <div class="accordion-group block">
+              <div class="accordion-heading" {{action "onToggleBlock" alert target="view"}}>
+                <i {{bindAttr class=":pull-left :accordion-toggle alert.isCollapsed:icon-caret-right:icon-caret-down"}}></i>
+                <a class="accordion-toggle" href="#" {{action "gotoAlertDetails" alert target="view"}}>{{alert.label}}</a>
+              </div>
+              <div id="{{unbound alert.clientId}}" class="accordion-body collapse in" style="display: none">
+                <div class="accordion-inner definition-latest-text">
+                  {{alert.latestText}}
+                </div>
+              </div>
             </div>
           </div>
           <div class="span4 status-col" rel="alert-status-tooltip" >

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/utils/configs/config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_initializer.js b/ambari-web/app/utils/configs/config_initializer.js
index cb5b41f..de9ca7e 100644
--- a/ambari-web/app/utils/configs/config_initializer.js
+++ b/ambari-web/app/utils/configs/config_initializer.js
@@ -20,7 +20,6 @@ var App = require('app');
 var stringUtils = require('utils/string_utils');
 
 require('utils/configs/config_initializer_class');
-require('utils/configs/mount_points_based_initializer_mixin');
 require('utils/configs/hosts_based_initializer_mixin');
 
 /**
@@ -53,7 +52,7 @@ function getZKBasedConfig() {
  *
  * @instance ConfigInitializer
  */
-App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedInitializerMixin, App.HostsBasedInitializerMixin, {
+App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitializerMixin, {
 
   initializers: function() {
     return {
@@ -111,26 +110,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
       'templeton.zookeeper.hosts': getZKBasedConfig(),
       'hadoop.registry.zk.quorum': getZKBasedConfig(),
       'hive.cluster.delegation.token.store.zookeeper.connectString': getZKBasedConfig(),
-      'instance.zookeeper.host': getZKBasedConfig(),
-
-      'dfs.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
-      'dfs.namenode.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
-      'dfs.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
-      'dfs.datanode.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
-      'yarn.nodemanager.local-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
-      'yarn.nodemanager.log-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
-      'mapred.local.dir': this.getMultipleMountPointsConfig(['TASKTRACKER', 'NODEMANAGER']),
-      'log.dirs': this.getMultipleMountPointsConfig('KAFKA_BROKER'),
-
-      'fs.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
-      'dfs.namenode.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
-      'yarn.timeline-service.leveldb-timeline-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
-      'yarn.timeline-service.leveldb-state-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
-      'dataDir': this.getSingleMountPointConfig('ZOOKEEPER_SERVER'),
-      'oozie_data_dir': this.getSingleMountPointConfig('OOZIE_SERVER'),
-      'storm.local.dir': this.getSingleMountPointConfig(['NODEMANAGER', 'NIMBUS']),
-      '*.falcon.graph.storage.directory': this.getSingleMountPointConfig('FALCON_SERVER'),
-      '*.falcon.graph.serialize.path': this.getSingleMountPointConfig('FALCON_SERVER')
+      'instance.zookeeper.host': getZKBasedConfig()
     }
   }.property(''),
 
@@ -146,9 +126,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
   },
 
   initializerTypes: [
-    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'},
-    {name: 'single_mountpoint', method: '_initAsSingleMountPoint'},
-    {name: 'multiple_mountpoints', method: '_initAsMultipleMountPoints'}
+    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'}
   ],
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js b/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
deleted file mode 100644
index 59a3985..0000000
--- a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * Regexp used to determine if mount point is windows-like
- *
- * @type {RegExp}
- */
-var winRegex = /^([a-z]):\\?$/;
-
-App.MountPointsBasedInitializerMixin = Em.Mixin.create({
-
-  /**
-   * Map for methods used as value-modifiers for configProperties with values as mount point(s)
-   * Used if mount point is win-like (@see winRegex)
-   * Key: id
-   * Value: method-name
-   *
-   * @type {{default: string, file: string, slashes: string}}
-   */
-  winReplacersMap: {
-    default: '_defaultWinReplace',
-    file: '_winReplaceWithFile',
-    slashes: '_defaultWinReplaceWithAdditionalSlashes'
-  },
-
-  /**
-   * Initializer for configs with value as one of the possible mount points
-   * Only hosts that contains on the components from <code>initializer.components</code> are processed
-   * Hosts with Windows needs additional processing (@see winReplacersMap)
-   * Value example: '/', '/some/cool/dir'
-   *
-   * @param {configProperty} configProperty
-   * @param {topologyLocalDB} localDB
-   * @param {object} dependencies
-   * @param {object} initializer
-   * @return {Object}
-   */
-  _initAsSingleMountPoint: function (configProperty, localDB, dependencies, initializer) {
-    var hostsInfo = this._updateHostInfo(localDB.hosts);
-    var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
-    var winReplacersMap = this.get('winReplacersMap');
-    // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
-    if (!setOfHostNames.length) {
-      return configProperty;
-    }
-    var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
-
-    var mPoint = allMountPoints[0].mountpoint;
-    if (mPoint === "/") {
-      mPoint = Em.get(configProperty, 'recommendedValue');
-    }
-    else {
-      var mp = mPoint.toLowerCase();
-      if (winRegex.test(mp)) {
-        var methodName = winReplacersMap[initializer.winReplacer];
-        mPoint = this[methodName].call(this, configProperty, mp);
-      }
-      else {
-        mPoint = mPoint + Em.get(configProperty, 'recommendedValue');
-      }
-    }
-    Em.setProperties(configProperty, {
-      value: mPoint,
-      recommendedValue: mPoint
-    });
-
-    return configProperty;
-  },
-
-  /**
-   * Initializer for configs with value as all of the possible mount points
-   * Only hosts that contains on the components from <code>initializer.components</code> are processed
-   * Hosts with Windows needs additional processing (@see winReplacersMap)
-   * Value example: '/\n/some/cool/dir' (`\n` - is divider)
-   *
-   * @param {Object} configProperty
-   * @param {topologyLocalDB} localDB
-   * @param {object} dependencies
-   * @param {object} initializer
-   * @return {Object}
-   */
-  _initAsMultipleMountPoints: function (configProperty, localDB, dependencies, initializer) {
-    var hostsInfo = this._updateHostInfo(localDB.hosts);
-    var self = this;
-    var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
-    var winReplacersMap = this.get('winReplacersMap');
-    // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
-    if (!setOfHostNames.length) {
-      return configProperty;
-    }
-
-    var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
-    var mPoint = '';
-
-    allMountPoints.forEach(function (eachDrive) {
-      if (eachDrive.mountpoint === '/') {
-        mPoint += Em.get(configProperty, 'recommendedValue') + "\n";
-      }
-      else {
-        var mp = eachDrive.mountpoint.toLowerCase();
-        if (winRegex.test(mp)) {
-          var methodName = winReplacersMap[initializer.winReplacer];
-          mPoint += self[methodName].call(this, configProperty, mp);
-        }
-        else {
-          mPoint += eachDrive.mountpoint + Em.get(configProperty, 'recommendedValue') + "\n";
-        }
-      }
-    }, this);
-
-    Em.setProperties(configProperty, {
-      value: mPoint,
-      recommendedValue: mPoint
-    });
-
-    return configProperty;
-  },
-
-  /**
-   * Replace drive-based windows-path with 'file:///'
-   *
-   * @param {configProperty} configProperty
-   * @param {string} mountPoint
-   * @returns {string}
-   * @private
-   */
-  _winReplaceWithFile: function (configProperty, mountPoint) {
-    var winDriveUrl = mountPoint.toLowerCase().replace(winRegex, 'file:///$1:');
-    return winDriveUrl + Em.get(configProperty, 'recommendedValue') + '\n';
-  },
-
-  /**
-   * Replace drive-based windows-path
-   *
-   * @param {configProperty} configProperty
-   * @param {string} mountPoint
-   * @returns {string}
-   * @private
-   */
-  _defaultWinReplace: function (configProperty, mountPoint) {
-    var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
-    var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\');
-    return winDrive + winDir + '\n';
-  },
-
-  /**
-   * Same to <code>_defaultWinReplace</code>, but with extra-slash in the end
-   *
-   * @param {configProperty} configProperty
-   * @param {string} mountPoint
-   * @returns {string}
-   * @private
-   */
-  _defaultWinReplaceWithAdditionalSlashes: function (configProperty, mountPoint) {
-    var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
-    var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\\\');
-    return winDrive + winDir + '\n';
-  },
-
-  /**
-   * Update information from localDB using <code>App.Host</code>-model
-   *
-   * @param {object} hostsInfo
-   * @returns {object}
-   * @private
-   */
-  _updateHostInfo: function (hostsInfo) {
-    App.Host.find().forEach(function (item) {
-      if (!hostsInfo[item.get('id')]) {
-        hostsInfo[item.get('id')] = {
-          name: item.get('id'),
-          cpu: item.get('cpu'),
-          memory: item.get('memory'),
-          disk_info: item.get('diskInfo'),
-          bootStatus: "REGISTERED",
-          isInstalled: true
-        };
-      }
-    });
-    return hostsInfo;
-  },
-
-  /**
-   * Determines if mount point is valid
-   * Criterias:
-   * <ul>
-   *   <li>Should has available space</li>
-   *   <li>Should not be home-dir</li>
-   *   <li>Should not be docker-dir</li>
-   *   <li>Should not be boot-dir</li>
-   *   <li>Should not be dev-dir</li>
-   *   <li>Valid mount point started from /usr/hdp/ should be /usr/hdp/current
-   *       or /usr/hdp/<STACK_VERSION_NUMBER> e.g. /usr/hdp/2.5.0.0
-   *   </li>
-   * </ul>
-   *
-   * @param {{mountpoint: string, available: number}} mPoint
-   * @returns {function} true - valid, false - invalid
-   * @private
-   */
-  _filterMountPoint: function (localDB) {
-    var stackVersionNumber = [Em.getWithDefault(localDB.selectedStack || {}, 'repository_version', null)].compact();
-    return function(mPoint) {
-      var isAvailable = mPoint.available !== 0;
-      if (!isAvailable) {
-        return false;
-      }
-
-      var stackRoot = '/usr/hdp';
-      var notHome = !['/', '/home'].contains(mPoint.mountpoint);
-      var notDocker = !['/etc/resolv.conf', '/etc/hostname', '/etc/hosts'].contains(mPoint.mountpoint);
-      var notBoot = mPoint.mountpoint && !(mPoint.mountpoint.startsWith('/boot')
-                                           || mPoint.mountpoint.startsWith('/mnt')
-                                           || mPoint.mountpoint.startsWith('/tmp'));
-      var notDev = !(['devtmpfs', 'tmpfs', 'vboxsf', 'CDFS'].contains(mPoint.type));
-      var validStackRootMount = !(mPoint.mountpoint.startsWith(stackRoot) && !['current'].concat(stackVersionNumber).filter(function(i) {
-        return mPoint.mountpoint === stackRoot + '/' + i;
-      }).length);
-
-      return notHome && notDocker && notBoot && notDev && validStackRootMount;
-    };
-  },
-
-  /**
-   * Get list of hostNames from localDB which contains needed components
-   *
-   * @param {topologyLocalDB} localDB
-   * @param {object} initializer
-   * @returns {string[]}
-   * @private
-   */
-  _getSetOfHostNames: function (localDB, initializer) {
-    var masterComponentHostsInDB = Em.getWithDefault(localDB, 'masterComponentHosts', []);
-    var slaveComponentHostsInDB = Em.getWithDefault(localDB, 'slaveComponentHosts', []);
-    var hosts = masterComponentHostsInDB.filter(function (master) {
-      return initializer.components.contains(master.component);
-    }).mapProperty('hostName');
-
-    var sHosts = slaveComponentHostsInDB.find(function (slave) {
-      return initializer.components.contains(slave.componentName);
-    });
-    if (sHosts) {
-      hosts = hosts.concat(sHosts.hosts.mapProperty('hostName'));
-    }
-    return hosts;
-  },
-
-  /**
-   * Get list of all unique valid mount points for hosts
-   *
-   * @param {string[]} setOfHostNames
-   * @param {object} hostsInfo
-   * @param {topologyLocalDB} localDB
-   * @returns {string[]}
-   * @private
-   */
-  _getAllMountPoints: function (setOfHostNames, hostsInfo, localDB) {
-    var allMountPoints = [],
-        mountPointFilter = this._filterMountPoint(localDB);
-    for (var i = 0; i < setOfHostNames.length; i++) {
-      var hostname = setOfHostNames[i];
-      var mountPointsPerHost = hostsInfo[hostname].disk_info;
-      var mountPointAsRoot = mountPointsPerHost.findProperty('mountpoint', '/');
-
-      // If Server does not send any host details information then atleast one mountpoint should be presumed as root
-      // This happens in a single container Linux Docker environment.
-      if (!mountPointAsRoot) {
-        mountPointAsRoot = {
-          mountpoint: '/'
-        };
-      }
-
-      mountPointsPerHost.filter(mountPointFilter).forEach(function (mPoint) {
-        if( !allMountPoints.findProperty("mountpoint", mPoint.mountpoint)) {
-          allMountPoints.push(mPoint);
-        }
-      }, this);
-    }
-
-    if (!allMountPoints.length) {
-      allMountPoints.push(mountPointAsRoot);
-    }
-    return allMountPoints;
-  },
-
-  /**
-   * Settings for <code>single_mountpoint</code>-initializer
-   * Used for configs with value as one of the possible mount points
-   *
-   * @see _initAsSingleMountPoint
-   * @param {string|string[]} components
-   * @param {string} winReplacer
-   * @returns {{components: string[], winReplacer: string, type: string}}
-   */
-  getSingleMountPointConfig: function (components, winReplacer) {
-    winReplacer = winReplacer || 'default';
-    return {
-      components: Em.makeArray(components),
-      winReplacer: winReplacer,
-      type: 'single_mountpoint'
-    };
-  },
-
-  /**
-   * Settings for <code>multiple_mountpoints</code>-initializer
-   * Used for configs with value as all of the possible mount points
-   *
-   * @see _initAsMultipleMountPoints
-   * @param {string|string[]} components
-   * @param {string} winReplacer
-   * @returns {{components: string[], winReplacer: string, type: string}}
-   */
-  getMultipleMountPointsConfig: function (components, winReplacer) {
-    winReplacer = winReplacer || 'default';
-    return {
-      components: Em.makeArray(components),
-      winReplacer: winReplacer,
-      type: 'multiple_mountpoints'
-    };
-  }
-
-});

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js b/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
index 71156f4..c7e810b 100644
--- a/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
@@ -645,11 +645,8 @@ App.SliderConfigWidgetView = App.ConfigWidgetView.extend({
           return false;
         }
       }
-      if (this.get('config.name') === 'llap_queue_capacity') {
-        this.get('config').validate();
-      } else {
-        this.updateWarningsForCompatibilityWithWidget('');
-      }
+
+      this.updateWarningsForCompatibilityWithWidget('');
       return true;
     }
     return false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js b/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
index 656c487..680daa5 100644
--- a/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
+++ b/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
@@ -22,18 +22,17 @@ var App = require('app');
 App.ManageJournalNodeWizardStep2View = Em.View.extend({
 
   templateName: require('templates/main/admin/highAvailability/journalNode/step2'),
-  aaa: '',
   didInsertElement: function () {
     this.get('controller').loadStep();
   },
 
   journalNodesToAdd: function () {
     return App.router.get('manageJournalNodeWizardController').getJournalNodesToAdd();
-  }.property(),
+  }.property('App.router.clusterController.isHostsLoaded'),
 
   journalNodesToDelete: function () {
     return App.router.get('manageJournalNodeWizardController').getJournalNodesToDelete();
-  }.property(),
+  }.property('App.router.clusterController.isHostsLoaded'),
 
   isBackButtonVisible: false
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_details_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_details_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_details_view.js
index 983c26a..dae236b 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_details_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_details_view.js
@@ -81,5 +81,5 @@ App.MainAdminStackUpgradeHistoryDetailsView = Em.View.extend({
     } else {
       return "";
     }
-  }.property('controller.upgradeData.Upgrade.request_status', 'controller.isDowngrade'),
+  }.property('controller.upgradeData.Upgrade.request_status', 'controller.isDowngrade')
 });
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_view.js
index ef5f46b..f7b7f0e 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_history_view.js
@@ -57,6 +57,11 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
       isSelected: false
     }),
     Em.Object.create({
+      labelKey: 'admin.stackVersions.upgradeHistory.filter.failed.upgrade',
+      value: 'UPGRADE_FAILED',
+      isSelected: false
+    }),
+    Em.Object.create({
       labelKey: 'admin.stackVersions.upgradeHistory.filter.successful.downgrade',
       value: 'DOWNGRADE_COMPLETED',
       isSelected: false
@@ -66,6 +71,25 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
       value: 'DOWNGRADE_ABORTED',
       isSelected: false
     }),
+    Em.Object.create({
+      labelKey: 'admin.stackVersions.upgradeHistory.filter.failed.downgrade',
+      value: 'DOWNGRADE_FAILED',
+      isSelected: false
+    })
+  ],
+  upgradeMethods: [
+    Em.Object.create({
+      displayName: Em.I18n.t('common.rolling'),
+      type: 'ROLLING'
+    }),
+    Em.Object.create({
+      displayName: Em.I18n.t('common.express'),
+      type: 'NON_ROLLING'
+    }),
+    Em.Object.create({
+      displayName: Em.I18n.t('common.hostOrdered'),
+      type: 'HOST_ORDERED'
+    })
   ],
 
   /**
@@ -73,7 +97,7 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
    */
   selectedCategory: Em.computed.findBy('categories', 'isSelected', true),
 
-  filteredCount: function(){
+  filteredCount: function () {
     var filteredContent = this.get('filteredContent').toArray();
     return filteredContent.length;
   }.property('filteredContent'),
@@ -82,13 +106,12 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
    * displaying content filtered by upgrade type and upgrade status.
    */
   filteredContent: function () {
-    var result = [];
     var filterValue = 'ALL';
     var category = this.get('selectedCategory');
-    if (category)
+    if (category) {
       filterValue = category.get('value');
-    var result = this.filterBy(filterValue);
-    return result.reverse();
+    }
+    return this.filterBy(filterValue).reverse();
   }.property('selectedCategory'),
 
   /**
@@ -97,28 +120,25 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
   pageContent: function () {
     var content = this.get('filteredContent').toArray();
     content = this.processForDisplay(content);
-    content = content.slice(this.get('startIndex') - 1, this.get('endIndex'))
+    content = content.slice(this.get('startIndex') - 1, this.get('endIndex'));
     return content;
   }.property('filteredContent', 'startIndex', 'endIndex'),
 
-  processForDisplay: function(content){
+  processForDisplay: function (content) {
     var processedContent = [];
-    content.forEach(function(item){
-      if('UPGRADE' == item.get('direction'))
-        item.set('directionLabel', Em.I18n.t('common.upgrade'));
-      else
-        item.set('directionLabel', Em.I18n.t('common.downgrade'));
-
-      if('NON_ROLLING' == item.get('upgradeType'))
-        item.set('upgradeTypeLabel', Em.I18n.t('common.express'));
-      else
-        item.set('upgradeTypeLabel', Em.I18n.t('common.rolling'));
-
-      item.set('startTimeLabel', date.startTime(item.get('startTime')));
-      item.set('endTimeLabel', date.startTime(item.get('endTime')));
-      item.set('duration', date.durationSummary(item.get('startTime'), item.get('endTime')));
+
+    content.forEach(function (item) {
+      var direction = item.get('direction') === 'UPGRADE' ? Em.I18n.t('common.upgrade') : Em.I18n.t('common.downgrade');
+      var method = this.get('upgradeMethods').findProperty('type', item.get('upgradeType'));
+      item.setProperties({
+        directionLabel: direction,
+        upgradeTypeLabel: method ? method.get('displayName') : method,
+        startTimeLabel: date.startTime(item.get('startTime')),
+        endTimeLabel: date.startTime(item.get('endTime')),
+        duration: date.durationSummary(item.get('startTime'), item.get('endTime'))
+      });
       processedContent.push(item);
-    },this);
+    }, this);
     return processedContent;
   },
 
@@ -160,9 +180,9 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
     }
   },
 
-  willInsertElement: function(){
+  willInsertElement: function () {
     var self = this;
-    this.get('controller').loadStackUpgradeHistoryToModel().done(function(){
+    this.get('controller').loadStackUpgradeHistoryToModel().done(function () {
       self.populateUpgradeHistorySummary();
     });
   },
@@ -171,31 +191,31 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
     this.observesCategories();
   },
 
-  observesCategories: function(){
+  observesCategories: function () {
     this.get('categories').forEach(function (category) {
       var label = Em.I18n.t(category.labelKey).format(this.filterBy(category.value).length);
       category.set('label', label)
     }, this);
   }.observes('isReady'),
 
-  filterBy: function(filterValue){
+  filterBy: function (filterValue) {
     if ('ALL' == filterValue) {
       var all_records = App.StackUpgradeHistory.find();
       return all_records.toArray();
     } else {
       var tokens = filterValue.split('_');
-      var direction_token = null
-      var status_token = null
+      var direction_token = null;
+      var status_token = null;
 
       if (tokens.length == 1) {
         direction_token = tokens[0]
       } else if (tokens.length > 1) {
-        direction_token = tokens[0]
-        status_token = tokens[1]
+        direction_token = tokens[0];
+        status_token = tokens[1];
       }
 
-      var result = []
-      App.StackUpgradeHistory.find().forEach(function(item){
+      var result = [];
+      App.StackUpgradeHistory.find().forEach(function (item) {
         var direction = item.get('direction');
         if (direction == direction_token) {
           if (status_token != null) {
@@ -214,46 +234,46 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
     }
   },
 
-  selectCategory: function(event){
+  selectCategory: function (event) {
     this.get('categories').filterProperty('isSelected').setEach('isSelected', false);
     event.context.set('isSelected', true);
   },
 
-  populateUpgradeHistorySummary: function(){
+  populateUpgradeHistorySummary: function () {
     this.set('isReady', false);
     var result = [
       Em.Object.create({
         direction: 'UPGRADE',
-        label:Em.I18n.t('common.upgrade'),
+        label: Em.I18n.t('common.upgrade'),
         hasSuccess: false,
-        success:0,
+        success: 0,
         hasAbort: false,
-        abort:0,
+        abort: 0
       }),
       Em.Object.create({
         direction: 'DOWNGRADE',
-        label:Em.I18n.t('common.downgrade'),
+        label: Em.I18n.t('common.downgrade'),
         hasSuccess: false,
-        success:0,
+        success: 0,
         hasAbort: false,
-        abort:0,
+        abort: 0
       })
     ];
 
-    App.StackUpgradeHistory.find().forEach(function(item){
+    App.StackUpgradeHistory.find().forEach(function (item) {
       var direction = item.get('direction');
       var status = item.get('requestStatus');
-      if('UPGRADE' == direction){
-        if('COMPLETED' == status){
+      if ('UPGRADE' == direction) {
+        if ('COMPLETED' == status) {
           result[0].set('success', result[0].get('success') + 1);
         } else if ('ABORTED' == status) {
           result[0].set('abort', result[0].get('abort') + 1);
         }
-      } else if('DOWNGRADE' == direction){
-        if('COMPLETED' == status){
-          result[1].set('success', result[1].get('success')+1);
-        } else if ('ABORTED' == status){
-          result[1].set('abort', result[1].get('abort')+1);
+      } else if ('DOWNGRADE' == direction) {
+        if ('COMPLETED' == status) {
+          result[1].set('success', result[1].get('success') + 1);
+        } else if ('ABORTED' == status) {
+          result[1].set('abort', result[1].get('abort') + 1);
         }
       }
     }, this);
@@ -267,23 +287,18 @@ App.MainAdminStackUpgradeHistoryView = App.TableView.extend(App.TableServerViewM
     this.set('isReady', true);
   },
 
-  showUpgradeHistoryRecord: function(event) {
-    var record = event.context
-    var title = '';
+  showUpgradeHistoryRecord: function (event) {
+    var record = event.context;
     var direction = App.format.normalizeName(record.get('direction'));
-    var type = record.get('upgradeType')
-    if ('ROLLING' == type)
-      type = App.format.normalizeName(type);
-    else if ('NON_ROLLING' == type)
-      type = 'Express'
-
-    title = Em.I18n.t('admin.stackVersions.upgradeHistory.record.title').format(type, direction, record.get('fromVersion'));
+    var type = this.get('upgradeMethods').findProperty('type', record.get('upgradeType'));
+    var displayName = type ? type.get('displayName') : record.get('upgradeType');
 
-    this.get('controller').set('currentUpgradeRecord', record)
+    this.get('controller').set('currentUpgradeRecord', record);
 
     App.ModalPopup.show({
-      classNames: ['full-width-modal'],
-      header: title,
+      classNames: ['wizard-modal-wrapper'],
+      modalDialogClasses: ['modal-xlg'],
+      header: Em.I18n.t('admin.stackVersions.upgradeHistory.record.title').format(displayName, direction, record.get('fromVersion')),
       bodyClass: App.MainAdminStackUpgradeHistoryDetailsView,
       primary: Em.I18n.t('common.dismiss'),
       secondary: null,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index e54a0fa..bcb1ad7 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -68,7 +68,11 @@ App.upgradeWizardView = Em.View.extend({
    * when downgrade already started
    * @type {boolean}
    */
-  isDowngradeAvailable: Em.computed.and('!controller.isDowngrade', 'controller.downgradeAllowed'),
+  isDowngradeAvailable: Em.computed.and(
+    '!controller.isDowngrade',
+    'controller.downgradeAllowed',
+    '!controller.cantBeStarted'
+  ),
 
   /**
    * progress value is rounded to floor

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js b/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
index 5e7b0e3..9a73524 100644
--- a/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
+++ b/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
@@ -295,7 +295,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
 
       controller.set('tasks', commandsForDB);
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1,2,5,6,8,9,10,12]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1,2,5,6,8,10,12]);
     });
 
     it('reassign component is Mysql Server', function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/test/models/stack_service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/stack_service_test.js b/ambari-web/test/models/stack_service_test.js
index 9f12e88..bc101e0 100644
--- a/ambari-web/test/models/stack_service_test.js
+++ b/ambari-web/test/models/stack_service_test.js
@@ -283,7 +283,76 @@ describe('App.StackService', function () {
     });
   });
 
-  App.TestAliases.testAsComputedOr(ss, 'isDisabled', ['isMandatory', 'isInstalled']);
+  describe('#isDisabled', function () {
+
+    var cases = [
+      {
+        isInstalled: true,
+        isMandatory: true,
+        clusterInstallCompleted: true,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: true,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: false,
+        clusterInstallCompleted: true,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: false,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: false,
+        isMandatory: true,
+        clusterInstallCompleted: true,
+        isDisabled: false
+      },
+      {
+        isInstalled: false,
+        isMandatory: true,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: false,
+        isMandatory: false,
+        clusterInstallCompleted: true,
+        isDisabled: false
+      },
+      {
+        isInstalled: false,
+        isMandatory: false,
+        clusterInstallCompleted: false,
+        isDisabled: false
+      }
+    ];
+
+    cases.forEach(function (testCase) {
+
+      var title = 'isInstalled: {0}, isMandatory: {1}, clusterInstallCompleted: {2}, isDisabled: {3}'
+        .format(testCase.isInstalled, testCase.isMandatory, testCase.clusterInstallCompleted, testCase.isDisabled);
+
+      it(title, function () {
+        ss.setProperties({
+          isInstalled: testCase.isInstalled,
+          isMandatory: testCase.isMandatory
+        });
+        App.set('router.clusterInstallCompleted', testCase.clusterInstallCompleted);
+        expect(ss.get('isDisabled')).to.equal(testCase.isDisabled);
+      });
+
+    });
+
+  });
 
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-web/test/utils/ajax/ajax_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/ajax/ajax_test.js b/ambari-web/test/utils/ajax/ajax_test.js
index 747ccff..35de05f 100644
--- a/ambari-web/test/utils/ajax/ajax_test.js
+++ b/ambari-web/test/utils/ajax/ajax_test.js
@@ -23,11 +23,16 @@ describe('App.ajax', function() {
 
   beforeEach(function() {
     App.ajax.send.restore();
+    sinon.stub(App.logger, 'setTimer');
     sinon.spy(App.ajax, 'send'); // no sense to test stubbed function, so going to spy on it
     App.set('apiPrefix', '/api/v1');
     App.set('clusterName', 'tdk');
   });
 
+  afterEach(function() {
+    App.logger.setTimer.restore();
+  });
+
   describe('#send', function() {
     it('Without sender', function() {
       expect(App.ajax.send({})).to.equal(null);
@@ -162,7 +167,7 @@ describe('App.ajax', function() {
       });
     });
   });
-  
+
   describe('#abortRequests', function () {
 
     var xhr = {
@@ -192,6 +197,6 @@ describe('App.ajax', function() {
     it('should clear requests array', function () {
       expect(requests).to.have.length(0);
     });
-    
+
   });
 });


[18/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
new file mode 100644
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..f21e4b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..d838211
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,326 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+
+config = Script.get_config()
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+hadoop_conf_dir = "/etc/hadoop/conf"
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
+
+# hadoop default params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
+  # last port config
+  zookeeper_quorum += ':' + zookeeper_clientPort
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname in nn_host:
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+
+if namenode_rpc:
+ nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100644
index 0000000..548f051
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..5dce8e0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,191 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+    if params.sysprep_skip_copy_fast_jar_hdfs:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           group=params.user_group,
+           content=InlineTemplate(params.hadoop_metrics2_properties_content)
+      )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+    create_microsoft_r_dir()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+
+def create_microsoft_r_dir():
+  import params
+  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+    directory = '/user/RevoShare'
+    try:
+      params.HdfsResource(directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hdfs_user,
+                          mode=0777)
+      params.HdfsResource(None, action="execute")
+    except Exception as exception:
+      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..2f3aab6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,105 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100644
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
new file mode 100644
index 0000000..9579d0f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
new file mode 100644
index 0000000..0364d41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <minJdk>1.7</minJdk>
+  <maxJdk>1.8</maxJdk>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
new file mode 100644
index 0000000..dd87b72
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -0,0 +1,323 @@
+{
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "2.2.1.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "2.1.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "tez_for_spark",
+      "description": "Tez dependency for Spark",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "2.4.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "storm_kerberos",
+      "description": "Storm Kerberos support (AMBARI-7570)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "storm_ams",
+      "description": "Storm AMS integration (AMBARI-10710)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "pig_on_tez",
+      "description": "Pig on Tez support (AMBARI-7863)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "2.2.0.0",
+      "max_version": "2.5.0.0"
+    },
+    {
+      "name": "accumulo_kerberos_user_auth",
+      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "2.3.8.0"
+    },
+    {
+      "name": "atlas_rolling_upgrade",
+      "description": "Rolling upgrade support for Atlas",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_create_hive_tez_configs",
+      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "falcon_extensions",
+      "description": "Falcon Extension",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "2.5.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "min_version": "2.2.3.0",
+      "max_version": "2.2.5.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_install_infra_client",
+      "description": "Ambari Infra Service support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "falcon_atlas_support_2_3",
+      "description": "Falcon Atlas integration support for 2.3 stack",
+      "min_version": "2.3.99.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "falcon_atlas_support",
+      "description": "Falcon Atlas integration",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_ranger_plugin_support",
+      "description": "Atlas Ranger plugin support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_conf_dir_in_path",
+      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+      "min_version": "2.3.0.0",
+      "max_version": "2.4.99.99"
+    },
+    {
+      "name": "atlas_upgrade_support",
+      "description": "Atlas supports express and rolling upgrades",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_hook_support",
+      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_admin_password_change",
+      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "storm_metrics_apache_classes",
+      "description": "Metrics sink for Storm that uses Apache class names",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_java_opts_support",
+      "description": "Allow Spark to generate java-opts file",
+      "min_version": "2.2.0.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "atlas_hbase_setup",
+      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_hive_plugin_jdbc_url",
+      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "zkfc_version_advertised",
+      "description": "ZKFC advertise version",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix_core_hdfs_site_required",
+      "description": "HDFS and CORE site required for Phoenix",
+      "max_version": "2.5.9.9"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
new file mode 100644
index 0000000..d1aab4b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
new file mode 100644
index 0000000..5145064
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/sles12/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="debian7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian7/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu16">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..9dcf561
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8697740
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>keyserver_host</name>
+    <value> </value>
+    <display-name>Key Server Host</display-name>
+    <description>Hostnames where Key Management Server is installed</description>
+    <value-attributes>
+      <type>string</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>keyserver_port</name>
+    <value/>
+    <display-name>Key Server Port</display-name>
+    <description>Port number where Key Management Server is available</description>
+    <value-attributes>
+      <type>int</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+{% if java_version &lt; 8 %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+
+{% else %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+# Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  {% if is_datanode_max_locked_memory_set %}
+  ulimit -l {{datanode_max_locked_memory}}
+  {% endif %}
+  ulimit -n {{hdfs_user_nofile_limit}}
+fi
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[06/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.4/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/configs/default.json b/ambari-server/src/test/python/stacks/2.4/configs/default.json
index a6e2478..1dbc611 100644
--- a/ambari-server/src/test/python/stacks/2.4/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.4/configs/default.json
@@ -274,7 +274,7 @@
         "logsearch_ui_protocol": "http",
         "logsearch_ui_port" : "61888",
         "logsearch_solr_audit_logs_use_ranger": "false",
-        "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#solr.url=http://{{solr_host}}:{{solr_port}}/solr\n\n#Service Logs and History colletion\nlogsearch.solr.zkhosts={{zookeeper_quorum}}{{solr_znode}}\nlogsearch.solr.collection.ser
 vice.logs={{logsearch_collection_service_logs}}\nlogsearch.solr.collection.history=history\n\nlogsearch.service.logs.split.interval.mins={{logsearch_service_logs_split_interval_mins}}\nlogsearch.collection.service.logs.numshards={{logsearch_collection_service_logs_numshards}}\nlogsearch.collection.service.logs.replication.factor={{logsearch_collection_service_logs_replication_factor}}\n\nlogsearch.service.logs.fields={{logsearch_service_logs_fields}}\n\n#Audit logs\nlogsearch.solr.audit.logs.zkhosts={{logsearch_solr_audit_logs_zk_quorum}}{{logsearch_solr_audit_logs_zk_node}}\nogsearch.solr.collection.audit.logs={{solr_collection_audit_logs}}\nlogsearch.solr.audit.logs.url={{logsearch_solr_audit_logs_url}}\n\nlogsearch.audit.logs.split.interval.mins={{logsearch_audit_logs_split_interval_mins}}\nlogsearch.collection.audit.logs.numshards={{logsearch_collection_audit_logs_numshards}}\nlogsearch.collection.audit.logs.replication.factor={{logsearch_collection_audit_logs_replication_factor
 }}\n{% if logsearch_solr_ssl_enabled %}\nexport LOGSEARCH_SSL=\"true\"\nexport LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}\nexport LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}\nexport LOGSEARCH_KEYSTORE_TYPE={{logsearch_keystore_type}}\nexport LOGSEARCH_TRUSTSTORE_LOCATION={{logsearch_truststore_location}}\nexport LOGSEARCH_TRUSTSTORE_PASSWORD={{logsearch_truststore_password}}\nexport LOGSEARCH_TRUSTSTORE_TYPE={{logsearch_truststore_type}}\n{% endif %}"
+        "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#solr.url=http://{{solr_host}}:{{solr_port}}/solr\n\n#Service Logs and History colletion\nlogsearch.solr.zkhosts={{zookeeper_quorum}}{{solr_znode}}\nlogsearch.solr.collection.ser
 vice.logs={{logsearch_collection_service_logs}}\nlogsearch.solr.collection.history=history\n\nlogsearch.service.logs.split.interval.mins={{logsearch_service_logs_split_interval_mins}}\nlogsearch.collection.service.logs.numshards={{logsearch_collection_service_logs_numshards}}\nlogsearch.collection.service.logs.replication.factor={{logsearch_collection_service_logs_replication_factor}}\n\nlogsearch.service.logs.fields={{logsearch_service_logs_fields}}\n\n#Audit logs\nlogsearch.solr.audit.logs.zkhosts={{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}\nogsearch.solr.collection.audit.logs={{solr_collection_audit_logs}}\nlogsearch.solr.audit.logs.url={{logsearch_solr_audit_logs_url}}\n\nlogsearch.audit.logs.split.interval.mins={{logsearch_audit_logs_split_interval_mins}}\nlogsearch.collection.audit.logs.numshards={{logsearch_collection_audit_logs_numshards}}\nlogsearch.collection.audit.logs.replication.factor={{logsearch_collection_audit_logs_replication_factor}}\n{% if logsearch_s
 olr_ssl_enabled %}\nexport LOGSEARCH_SSL=\"true\"\nexport LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}\nexport LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}\nexport LOGSEARCH_KEYSTORE_TYPE={{logsearch_keystore_type}}\nexport LOGSEARCH_TRUSTSTORE_LOCATION={{logsearch_truststore_location}}\nexport LOGSEARCH_TRUSTSTORE_PASSWORD={{logsearch_truststore_password}}\nexport LOGSEARCH_TRUSTSTORE_TYPE={{logsearch_truststore_type}}\n{% endif %}"
       },
       "logsearch-service_logs-solrconfig": {
         "content": "&lt;?xml version=\"1.0\" encoding=\"UTF-8\" ?&gt;\n&lt;!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n--&gt;\n\n&lt;!-- \n     For more details about configurations options that may appear in\n     this file, see http://wiki.apache.or
 g/solr/SolrConfigXml. \n--&gt;\n&lt;config&gt;\n  &lt;!-- In all configuration below, a prefix of \"solr.\" for class names\n       is an alias that causes solr to search appropriate packages,\n       including org.apache.solr.(search|update|request|core|analysis)\n\n       You may also specify a fully qualified Java classname if you\n       have your own custom plugins.\n    --&gt;\n\n  &lt;!-- Controls what version of Lucene various components of Solr\n       adhere to.  Generally, you want to use the latest version to\n       get all bug fixes and improvements. It is highly recommended\n       that you fully re-index after changing this setting as it can\n       affect both how text is indexed and queried.\n  --&gt;\n  &lt;luceneMatchVersion&gt;5.0.0&lt;/luceneMatchVersion&gt;\n\n  &lt;!-- &lt;lib/&gt; directives can be used to instruct Solr to load any Jars\n       identified and use them to resolve any \"plugins\" specified in\n       your solrconfig.xml or schema.xml (ie: Anal
 yzers, Request\n       Handlers, etc...).\n\n       All directories and paths are resolved relative to the\n       instanceDir.\n\n       Please note that &lt;lib/&gt; directives are processed in the order\n       that they appear in your solrconfig.xml file, and are \"stacked\" \n       on top of each other when building a ClassLoader - so if you have \n       plugin jars with dependencies on other jars, the \"lower level\" \n       dependency jars should be loaded first.\n\n       If a \"./lib\" directory exists in your instanceDir, all files\n       found in it are included as if you had used the following\n       syntax...\n       \n              &lt;lib dir=\"./lib\" /&gt;\n    --&gt;\n\n  &lt;!-- A 'dir' option by itself adds any files found in the directory \n       to the classpath, this is useful for including all jars in a\n       directory.\n\n       When a 'regex' is specified in addition to a 'dir', only the\n       files in that directory which completely match the reg
 ex\n       (anchored on both ends) will be included.\n\n       If a 'dir' option (with or without a regex) is used and nothing\n       is found that matches, a warning will be logged.\n\n       The examples below can be used to load some solr-contribs along \n       with their external dependencies.\n    --&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-dataimporthandler-.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/extraction/lib\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-cell-\\d.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/clustering/lib/\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-clustering-\\d.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/langid/lib/\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-langid-\\d.*
 \\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/velocity/lib\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-velocity-\\d.*\\.jar\" /&gt;\n\n  &lt;!-- an exact 'path' can be used instead of a 'dir' to specify a \n       specific jar file.  This will cause a serious error to be logged \n       if it can't be loaded.\n    --&gt;\n  &lt;!--\n     &lt;lib path=\"../a-jar-that-does-not-exist.jar\" /&gt; \n  --&gt;\n  \n  &lt;!-- Data Directory\n\n       Used to specify an alternate directory to hold all index data\n       other than the default ./data under the Solr home.  If\n       replication is in use, this should match the replication\n       configuration.\n    --&gt;\n  &lt;dataDir&gt;${solr.data.dir:}&lt;/dataDir&gt;\n\n\n  &lt;!-- The DirectoryFactory to use for indexes.\n       \n       solr.StandardDirectoryFactory is filesystem\n       based and tries to pick the best implementation for the current\n  
      JVM and platform.  solr.NRTCachingDirectoryFactory, the default,\n       wraps solr.StandardDirectoryFactory and caches small files in memory\n       for better NRT performance.\n\n       One can force a particular implementation via solr.MMapDirectoryFactory,\n       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.\n\n       solr.RAMDirectoryFactory is memory based, not\n       persistent, and doesn't work with replication.\n    --&gt;\n  &lt;directoryFactory name=\"DirectoryFactory\" \n                    class=\"${solr.directoryFactory:solr.NRTCachingDirectoryFactory}\"&gt;\n    \n         \n    &lt;!-- These will be used if you are using the solr.HdfsDirectoryFactory,\n         otherwise they will be ignored. If you don't plan on using hdfs,\n         you can safely remove this section. --&gt;      \n    &lt;!-- The root directory that collection data should be written to. --&gt;     \n    &lt;str name=\"solr.hdfs.home\"&gt;${solr.hdfs.home:}&lt;/str&gt;\n    &
 lt;!-- The hadoop configuration files to use for the hdfs client. --&gt;    \n    &lt;str name=\"solr.hdfs.confdir\"&gt;${solr.hdfs.confdir:}&lt;/str&gt;\n    &lt;!-- Enable/Disable the hdfs cache. --&gt;    \n    &lt;str name=\"solr.hdfs.blockcache.enabled\"&gt;${solr.hdfs.blockcache.enabled:true}&lt;/str&gt;\n    &lt;!-- Enable/Disable using one global cache for all SolrCores. \n         The settings used will be from the first HdfsDirectoryFactory created. --&gt;    \n    &lt;str name=\"solr.hdfs.blockcache.global\"&gt;${solr.hdfs.blockcache.global:true}&lt;/str&gt;\n    \n  &lt;/directoryFactory&gt; \n\n  &lt;!-- The CodecFactory for defining the format of the inverted index.\n       The default implementation is SchemaCodecFactory, which is the official Lucene\n       index format, but hooks into the schema to provide per-field customization of\n       the postings lists and per-document values in the fieldType element\n       (postingsFormat/docValuesFormat). Note that most of
  the alternative implementations\n       are experimental, so if you choose to customize the index format, it's a good\n       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)\n       before upgrading to a newer version to avoid unnecessary reindexing.\n  --&gt;\n  &lt;codecFactory class=\"solr.SchemaCodecFactory\"/&gt;\n\n  &lt;!-- To enable dynamic schema REST APIs, use the following for &lt;schemaFactory&gt;: --&gt;\n  \n       &lt;schemaFactory class=\"ManagedIndexSchemaFactory\"&gt;\n         &lt;bool name=\"mutable\"&gt;true&lt;/bool&gt;\n         &lt;str name=\"managedSchemaResourceName\"&gt;managed-schema&lt;/str&gt;\n       &lt;/schemaFactory&gt;\n&lt;!--       \n       When ManagedIndexSchemaFactory is specified, Solr will load the schema from\n       the resource named in 'managedSchemaResourceName', rather than from schema.xml.\n       Note that the managed schema resource CANNOT be named schema.xml.  If the managed\n       schema 
 does not exist, Solr will create it after reading schema.xml, then rename\n       'schema.xml' to 'schema.xml.bak'. \n       \n       Do NOT hand edit the managed schema - external modifications will be ignored and\n       overwritten as a result of schema modification REST API calls.\n\n       When ManagedIndexSchemaFactory is specified with mutable = true, schema\n       modification REST API calls will be allowed; otherwise, error responses will be\n       sent back for these requests. \n\n  &lt;schemaFactory class=\"ClassicIndexSchemaFactory\"/&gt;\n  --&gt;\n\n  &lt;!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Index Config - These settings control low-level behavior of indexing\n       Most example settings here show the default value, but are commented\n       out, to more easily see where customizations have been made.\n       \n       Note: This replaces &lt;indexDefaults&gt; and &lt;mainIndex&gt; from older versions\n       ~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --&gt;\n  &lt;indexConfig&gt;\n    &lt;!-- maxFieldLength was removed in 4.0. To get similar behavior, include a \n         LimitTokenCountFilterFactory in your fieldType definition. E.g. \n     &lt;filter class=\"solr.LimitTokenCountFilterFactory\" maxTokenCount=\"10000\"/&gt;\n    --&gt;\n    &lt;!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 --&gt;\n    &lt;!-- &lt;writeLockTimeout&gt;1000&lt;/writeLockTimeout&gt;  --&gt;\n    &lt;!-- LogSearch customization to avoid timeouts --&gt;\n    &lt;writeLockTimeout&gt;10000&lt;/writeLockTimeout&gt;\n\n    &lt;!-- The maximum number of simultaneous threads that may be\n         indexing documents at once in IndexWriter; if more than this\n         many threads arrive they will wait for others to finish.\n         Default in Solr/Lucene is 8. --&gt;\n    &lt;!-- &lt;maxIndexingThreads&gt;8&lt;/maxIndexingThreads&gt;  --&gt;\n    &lt;!-- LogSearch cu
 stomization of increase performance --&gt;\n    &lt;maxIndexingThreads&gt;50&lt;/maxIndexingThreads&gt;\n\n    &lt;!-- Expert: Enabling compound file will use less files for the index, \n         using fewer file descriptors on the expense of performance decrease. \n         Default in Lucene is \"true\". Default in Solr is \"false\" (since 3.6) --&gt;\n    &lt;!-- &lt;useCompoundFile&gt;false&lt;/useCompoundFile&gt; --&gt;\n\n    &lt;!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene\n         indexing for buffering added documents and deletions before they are\n         flushed to the Directory.\n         maxBufferedDocs sets a limit on the number of documents buffered\n         before flushing.\n         If both ramBufferSizeMB and maxBufferedDocs is set, then\n         Lucene will flush based on whichever limit is hit first.\n         The default is 100 MB.  --&gt;\n    &lt;!-- &lt;ramBufferSizeMB&gt;100&lt;/ramBufferSizeMB&gt; --&gt;\n    &lt;!-- &lt;maxBuffe
 redDocs&gt;1000&lt;/maxBufferedDocs&gt; --&gt;\n\n    &lt;!-- Expert: Merge Policy \n         The Merge Policy in Lucene controls how merging of segments is done.\n         The default since Solr/Lucene 3.3 is TieredMergePolicy.\n         The default since Lucene 2.3 was the LogByteSizeMergePolicy,\n         Even older versions of Lucene used LogDocMergePolicy.\n      --&gt;\n    &lt;!--\n        &lt;mergePolicy class=\"org.apache.lucene.index.TieredMergePolicy\"&gt;\n          &lt;int name=\"maxMergeAtOnce\"&gt;10&lt;/int&gt;\n          &lt;int name=\"segmentsPerTier\"&gt;10&lt;/int&gt;\n        &lt;/mergePolicy&gt;\n      --&gt;\n       \n    &lt;!-- Merge Factor\n         The merge factor controls how many segments will get merged at a time.\n         For TieredMergePolicy, mergeFactor is a convenience parameter which\n         will set both MaxMergeAtOnce and SegmentsPerTier at once.\n         For LogByteSizeMergePolicy, mergeFactor decides how many new segments\n         will b
 e allowed before they are merged into one.\n         Default is 10 for both merge policies.\n      --&gt;\n    &lt;!-- \n    &lt;mergeFactor&gt;10&lt;/mergeFactor&gt;\n      --&gt;\n    &lt;!-- LogSearch customization. Increased to 25 to maximize indexing speed --&gt;\n    &lt;mergeFactor&gt;25&lt;/mergeFactor&gt;\n\n    &lt;!-- Expert: Merge Scheduler\n         The Merge Scheduler in Lucene controls how merges are\n         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)\n         can perform merges in the background using separate threads.\n         The SerialMergeScheduler (Lucene 2.2 default) does not.\n     --&gt;\n    &lt;!-- \n       &lt;mergeScheduler class=\"org.apache.lucene.index.ConcurrentMergeScheduler\"/&gt;\n       --&gt;\n\n    &lt;!-- LockFactory \n\n         This option specifies which Lucene LockFactory implementation\n         to use.\n      \n         single = SingleInstanceLockFactory - suggested for a\n                  read-only index or when th
 ere is no possibility of\n                  another process trying to modify the index.\n         native = NativeFSLockFactory - uses OS native file locking.\n                  Do not use when multiple solr webapps in the same\n                  JVM are attempting to share a single index.\n         simple = SimpleFSLockFactory  - uses a plain file for locking\n\n         Defaults: 'native' is default for Solr3.6 and later, otherwise\n                   'simple' is the default\n\n         More details on the nuances of each LockFactory...\n         http://wiki.apache.org/lucene-java/AvailableLockFactories\n    --&gt;\n    &lt;lockType&gt;${solr.lock.type:native}&lt;/lockType&gt;\n\n    &lt;!-- Unlock On Startup\n\n         If true, unlock any held write or commit locks on startup.\n         This defeats the locking mechanism that allows multiple\n         processes to safely access a lucene index, and should be used\n         with care. Default is \"false\".\n\n         This is not n
 eeded if lock type is 'single'\n     --&gt;\n    &lt;!--\n    &lt;unlockOnStartup&gt;false&lt;/unlockOnStartup&gt;\n      --&gt;\n\n    &lt;!-- Commit Deletion Policy\n         Custom deletion policies can be specified here. The class must\n         implement org.apache.lucene.index.IndexDeletionPolicy.\n\n         The default Solr IndexDeletionPolicy implementation supports\n         deleting index commit points on number of commits, age of\n         commit point and optimized status.\n         \n         The latest commit point should always be preserved regardless\n         of the criteria.\n    --&gt;\n    &lt;!-- \n    &lt;deletionPolicy class=\"solr.SolrDeletionPolicy\"&gt;\n    --&gt;\n      &lt;!-- The number of commit points to be kept --&gt;\n      &lt;!-- &lt;str name=\"maxCommitsToKeep\"&gt;1&lt;/str&gt; --&gt;\n      &lt;!-- The number of optimized commit points to be kept --&gt;\n      &lt;!-- &lt;str name=\"maxOptimizedCommitsToKeep\"&gt;0&lt;/str&gt; --&gt;\n      &l
 t;!--\n          Delete all commit points once they have reached the given age.\n          Supports DateMathParser syntax e.g.\n        --&gt;\n      &lt;!--\n         &lt;str name=\"maxCommitAge\"&gt;30MINUTES&lt;/str&gt;\n         &lt;str name=\"maxCommitAge\"&gt;1DAY&lt;/str&gt;\n      --&gt;\n    &lt;!-- \n    &lt;/deletionPolicy&gt;\n    --&gt;\n\n    &lt;!-- Lucene Infostream\n       \n         To aid in advanced debugging, Lucene provides an \"InfoStream\"\n         of detailed information when indexing.\n\n         Setting the value to true will instruct the underlying Lucene\n         IndexWriter to write its info stream to solr's log. By default,\n         this is enabled here, and controlled through log4j.properties.\n      --&gt;\n     &lt;infoStream&gt;true&lt;/infoStream&gt;\n  &lt;/indexConfig&gt;\n\n\n  &lt;!-- JMX\n       \n       This example enables JMX if and only if an existing MBeanServer\n       is found, use this if you want to configure JMX through JVM\n    
    parameters. Remove this to disable exposing Solr configuration\n       and statistics to JMX.\n\n       For more details see http://wiki.apache.org/solr/SolrJmx\n    --&gt;\n  &lt;jmx /&gt;\n  &lt;!-- If you want to connect to a particular server, specify the\n       agentId \n    --&gt;\n  &lt;!-- &lt;jmx agentId=\"myAgent\" /&gt; --&gt;\n  &lt;!-- If you want to start a new MBeanServer, specify the serviceUrl --&gt;\n  &lt;!-- &lt;jmx serviceUrl=\"service:jmx:rmi:///jndi/rmi://localhost:9999/solr\"/&gt;\n    --&gt;\n\n  &lt;!-- The default high-performance update handler --&gt;\n  &lt;updateHandler class=\"solr.DirectUpdateHandler2\"&gt;\n\n    &lt;!-- Enables a transaction log, used for real-time get, durability, and\n         and solr cloud replica recovery.  The log can grow as big as\n         uncommitted changes to the index, so use of a hard autoCommit\n         is recommended (see below).\n         \"dir\" - the target directory for transaction logs, defaults to the\n   
              solr data directory.  --&gt; \n    &lt;updateLog&gt;\n      &lt;str name=\"dir\"&gt;${solr.ulog.dir:}&lt;/str&gt;\n    &lt;/updateLog&gt;\n \n    &lt;!-- AutoCommit\n\n         Perform a hard commit automatically under certain conditions.\n         Instead of enabling autoCommit, consider using \"commitWithin\"\n         when adding documents. \n\n         http://wiki.apache.org/solr/UpdateXmlMessages\n\n         maxDocs - Maximum number of documents to add since the last\n                   commit before automatically triggering a new commit.\n\n         maxTime - Maximum amount of time in ms that is allowed to pass\n                   since a document was added before automatically\n                   triggering a new commit. \n         openSearcher - if false, the commit causes recent index changes\n           to be flushed to stable storage, but does not cause a new\n           searcher to be opened to make those changes visible.\n\n         If the updateLog is enab
 led, then it's highly recommended to\n         have some sort of hard autoCommit to limit the log size.\n      --&gt;\n     &lt;autoCommit&gt; \n       &lt;maxTime&gt;${solr.autoCommit.maxTime:15000}&lt;/maxTime&gt; \n       &lt;openSearcher&gt;false&lt;/openSearcher&gt; \n     &lt;/autoCommit&gt;\n\n    &lt;!-- softAutoCommit is like autoCommit except it causes a\n         'soft' commit which only ensures that changes are visible\n         but does not ensure that data is synced to disk.  This is\n         faster and more near-realtime friendly than a hard commit.\n      --&gt;\n\n     &lt;autoSoftCommit&gt; \n       &lt;maxTime&gt;${solr.autoSoftCommit.maxTime:5000}&lt;/maxTime&gt; \n     &lt;/autoSoftCommit&gt;\n\n    &lt;!-- Update Related Event Listeners\n         \n         Various IndexWriter related events can trigger Listeners to\n         take actions.\n\n         postCommit - fired after every commit or optimize command\n         postOptimize - fired after every optimize 
 command\n      --&gt;\n    &lt;!-- The RunExecutableListener executes an external command from a\n         hook such as postCommit or postOptimize.\n         \n         exe - the name of the executable to run\n         dir - dir to use as the current working directory. (default=\".\")\n         wait - the calling thread waits until the executable returns. \n                (default=\"true\")\n         args - the arguments to pass to the program.  (default is none)\n         env - environment variables to set.  (default is none)\n      --&gt;\n    &lt;!-- This example shows how RunExecutableListener could be used\n         with the script based replication...\n         http://wiki.apache.org/solr/CollectionDistribution\n      --&gt;\n    &lt;!--\n       &lt;listener event=\"postCommit\" class=\"solr.RunExecutableListener\"&gt;\n         &lt;str name=\"exe\"&gt;solr/bin/snapshooter&lt;/str&gt;\n         &lt;str name=\"dir\"&gt;.&lt;/str&gt;\n         &lt;bool name=\"wait\"&gt;true&lt;
 /bool&gt;\n         &lt;arr name=\"args\"&gt; &lt;str&gt;arg1&lt;/str&gt; &lt;str&gt;arg2&lt;/str&gt; &lt;/arr&gt;\n         &lt;arr name=\"env\"&gt; &lt;str&gt;MYVAR=val1&lt;/str&gt; &lt;/arr&gt;\n       &lt;/listener&gt;\n      --&gt;\n\n  &lt;/updateHandler&gt;\n  \n  &lt;!-- IndexReaderFactory\n\n       Use the following format to specify a custom IndexReaderFactory,\n       which allows for alternate IndexReader implementations.\n\n       ** Experimental Feature **\n\n       Please note - Using a custom IndexReaderFactory may prevent\n       certain other features from working. The API to\n       IndexReaderFactory may change without warning or may even be\n       removed from future releases if the problems cannot be\n       resolved.\n\n\n       ** Features that may not work with custom IndexReaderFactory **\n\n       The ReplicationHandler assumes a disk-resident index. Using a\n       custom IndexReader implementation may cause incompatibility\n       with ReplicationHandle
 r and may cause replication to not work\n       correctly. See SOLR-1366 for details.\n\n    --&gt;\n  &lt;!--\n  &lt;indexReaderFactory name=\"IndexReaderFactory\" class=\"package.class\"&gt;\n    &lt;str name=\"someArg\"&gt;Some Value&lt;/str&gt;\n  &lt;/indexReaderFactory &gt;\n  --&gt;\n\n  &lt;!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Query section - these settings control query time things like caches\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --&gt;\n  &lt;query&gt;\n    &lt;!-- Max Boolean Clauses\n\n         Maximum number of clauses in each BooleanQuery,  an exception\n         is thrown if exceeded.\n\n         ** WARNING **\n         \n         This option actually modifies a global Lucene property that\n         will affect all SolrCores.  If multiple solrconfig.xml files\n         disagree on this property, the value at any given moment will\n         be based on the last SolrCore to be initiali
 zed.\n         \n      --&gt;\n    &lt;maxBooleanClauses&gt;1024&lt;/maxBooleanClauses&gt;\n\n\n    &lt;!-- Solr Internal Query Caches\n\n         There are two implementations of cache available for Solr,\n         LRUCache, based on a synchronized LinkedHashMap, and\n         FastLRUCache, based on a ConcurrentHashMap.  \n\n         FastLRUCache has faster gets and slower puts in single\n         threaded operation and thus is generally faster than LRUCache\n         when the hit ratio of the cache is high (&gt; 75%), and may be\n         faster under other scenarios on multi-cpu systems.\n    --&gt;\n\n    &lt;!-- Filter Cache\n\n         Cache used by SolrIndexSearcher for filters (DocSets),\n         unordered sets of *all* documents that match a query.  When a\n         new searcher is opened, its caches may be prepopulated or\n         \"autowarmed\" using data from caches in the old searcher.\n         autowarmCount is the number of items to prepopulate.  For\n         LRUCa
 che, the autowarmed items will be the most recently\n         accessed items.\n\n         Parameters:\n           class - the SolrCache implementation LRUCache or\n               (LRUCache or FastLRUCache)\n           size - the maximum number of entries in the cache\n           initialSize - the initial capacity (number of entries) of\n               the cache.  (see java.util.HashMap)\n           autowarmCount - the number of entries to prepopulate from\n               and old cache.  \n      --&gt;\n    &lt;filterCache class=\"solr.FastLRUCache\"\n                 size=\"512\"\n                 initialSize=\"512\"\n                 autowarmCount=\"0\"/&gt;\n\n    &lt;!-- Query Result Cache\n         \n         Caches results of searches - ordered lists of document ids\n         (DocList) based on a query, a sort, and the range of documents requested.  \n      --&gt;\n    &lt;queryResultCache class=\"solr.LRUCache\"\n                     size=\"512\"\n                     initialS
 ize=\"512\"\n                     autowarmCount=\"0\"/&gt;\n   \n    &lt;!-- Document Cache\n\n         Caches Lucene Document objects (the stored fields for each\n         document).  Since Lucene internal document ids are transient,\n         this cache will not be autowarmed.  \n      --&gt;\n    &lt;documentCache class=\"solr.LRUCache\"\n                   size=\"512\"\n                   initialSize=\"512\"\n                   autowarmCount=\"0\"/&gt;\n    \n    &lt;!-- custom cache currently used by block join --&gt; \n    &lt;cache name=\"perSegFilter\"\n      class=\"solr.search.LRUCache\"\n      size=\"10\"\n      initialSize=\"0\"\n      autowarmCount=\"10\"\n      regenerator=\"solr.NoOpRegenerator\" /&gt;\n\n    &lt;!-- Field Value Cache\n         \n         Cache used to hold field values that are quickly accessible\n         by document id.  The fieldValueCache is created by default\n         even if not configured here.\n      --&gt;\n    &lt;!--\n       &lt;fieldValu
 eCache class=\"solr.FastLRUCache\"\n                        size=\"512\"\n                        autowarmCount=\"128\"\n                        showItems=\"32\" /&gt;\n      --&gt;\n\n    &lt;!-- Custom Cache\n\n         Example of a generic cache.  These caches may be accessed by\n         name through SolrIndexSearcher.getCache(),cacheLookup(), and\n         cacheInsert().  The purpose is to enable easy caching of\n         user/application level data.  The regenerator argument should\n         be specified as an implementation of solr.CacheRegenerator \n         if autowarming is desired.  \n      --&gt;\n    &lt;!--\n       &lt;cache name=\"myUserCache\"\n              class=\"solr.LRUCache\"\n              size=\"4096\"\n              initialSize=\"1024\"\n              autowarmCount=\"1024\"\n              regenerator=\"com.mycompany.MyRegenerator\"\n              /&gt;\n      --&gt;\n\n\n    &lt;!-- Lazy Field Loading\n\n         If true, stored fields that are not requested
  will be loaded\n         lazily.  This can result in a significant speed improvement\n         if the usual case is to not load all stored fields,\n         especially if the skipped fields are large compressed text\n         fields.\n    --&gt;\n    &lt;enableLazyFieldLoading&gt;true&lt;/enableLazyFieldLoading&gt;\n\n   &lt;!-- Use Filter For Sorted Query\n\n        A possible optimization that attempts to use a filter to\n        satisfy a search.  If the requested sort does not include\n        score, then the filterCache will be checked for a filter\n        matching the query. If found, the filter will be used as the\n        source of document ids, and then the sort will be applied to\n        that.\n\n        For most situations, this will not be useful unless you\n        frequently get the same search repeatedly with different sort\n        options, and none of them ever use \"score\"\n     --&gt;\n   &lt;!--\n      &lt;useFilterForSortedQuery&gt;true&lt;/useFilterForSorte
 dQuery&gt;\n     --&gt;\n\n   &lt;!-- Result Window Size\n\n        An optimization for use with the queryResultCache.  When a search\n        is requested, a superset of the requested number of document ids\n        are collected.  For example, if a search for a particular query\n        requests matching documents 10 through 19, and queryWindowSize is 50,\n        then documents 0 through 49 will be collected and cached.  Any further\n        requests in that range can be satisfied via the cache.  \n     --&gt;\n   &lt;queryResultWindowSize&gt;20&lt;/queryResultWindowSize&gt;\n\n   &lt;!-- Maximum number of documents to cache for any entry in the\n        queryResultCache. \n     --&gt;\n   &lt;queryResultMaxDocsCached&gt;200&lt;/queryResultMaxDocsCached&gt;\n\n   &lt;!-- Query Related Event Listeners\n\n        Various IndexSearcher related events can trigger Listeners to\n        take actions.\n\n        newSearcher - fired whenever a new searcher is being prepared\n        and 
 there is a current searcher handling requests (aka\n        registered).  It can be used to prime certain caches to\n        prevent long request times for certain requests.\n\n        firstSearcher - fired whenever a new searcher is being\n        prepared but there is no current registered searcher to handle\n        requests or to gain autowarming data from.\n\n        \n     --&gt;\n    &lt;!-- QuerySenderListener takes an array of NamedList and executes a\n         local query request for each NamedList in sequence. \n      --&gt;\n    &lt;listener event=\"newSearcher\" class=\"solr.QuerySenderListener\"&gt;\n      &lt;arr name=\"queries\"&gt;\n        &lt;!--\n           &lt;lst&gt;&lt;str name=\"q\"&gt;solr&lt;/str&gt;&lt;str name=\"sort\"&gt;price asc&lt;/str&gt;&lt;/lst&gt;\n           &lt;lst&gt;&lt;str name=\"q\"&gt;rocks&lt;/str&gt;&lt;str name=\"sort\"&gt;weight asc&lt;/str&gt;&lt;/lst&gt;\n          --&gt;\n      &lt;/arr&gt;\n    &lt;/listener&gt;\n    &lt;listener ev
 ent=\"firstSearcher\" class=\"solr.QuerySenderListener\"&gt;\n      &lt;arr name=\"queries\"&gt;\n        &lt;lst&gt;\n          &lt;str name=\"q\"&gt;static firstSearcher warming in solrconfig.xml&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/arr&gt;\n    &lt;/listener&gt;\n\n    &lt;!-- Use Cold Searcher\n\n         If a search request comes in and there is no current\n         registered searcher, then immediately register the still\n         warming searcher and use it.  If \"false\" then all requests\n         will block until the first searcher is done warming.\n      --&gt;\n    &lt;useColdSearcher&gt;false&lt;/useColdSearcher&gt;\n\n    &lt;!-- Max Warming Searchers\n         \n         Maximum number of searchers that may be warming in the\n         background concurrently.  An error is returned if this limit\n         is exceeded.\n\n         Recommend values of 1-2 for read-only slaves, higher for\n         masters w/o cache warming.\n      --&gt;\n    &lt;maxWarmingSearc
 hers&gt;2&lt;/maxWarmingSearchers&gt;\n\n  &lt;/query&gt;\n\n\n  &lt;!-- Request Dispatcher\n\n       This section contains instructions for how the SolrDispatchFilter\n       should behave when processing requests for this SolrCore.\n\n       handleSelect is a legacy option that affects the behavior of requests\n       such as /select?qt=XXX\n\n       handleSelect=\"true\" will cause the SolrDispatchFilter to process\n       the request and dispatch the query to a handler specified by the \n       \"qt\" param, assuming \"/select\" isn't already registered.\n\n       handleSelect=\"false\" will cause the SolrDispatchFilter to\n       ignore \"/select\" requests, resulting in a 404 unless a handler\n       is explicitly registered with the name \"/select\"\n\n       handleSelect=\"true\" is not recommended for new users, but is the default\n       for backwards compatibility\n    --&gt;\n  &lt;requestDispatcher handleSelect=\"false\" &gt;\n    &lt;!-- Request Parsing\n\n         The
 se settings indicate how Solr Requests may be parsed, and\n         what restrictions may be placed on the ContentStreams from\n         those requests\n\n         enableRemoteStreaming - enables use of the stream.file\n         and stream.url parameters for specifying remote streams.\n\n         multipartUploadLimitInKB - specifies the max size (in KiB) of\n         Multipart File Uploads that Solr will allow in a Request.\n         \n         formdataUploadLimitInKB - specifies the max size (in KiB) of\n         form data (application/x-www-form-urlencoded) sent via\n         POST. You can use POST to pass request parameters not\n         fitting into the URL.\n         \n         addHttpRequestToContext - if set to true, it will instruct\n         the requestParsers to include the original HttpServletRequest\n         object in the context map of the SolrQueryRequest under the \n         key \"httpRequest\". It will not be used by any of the existing\n         Solr components, bu
 t may be useful when developing custom \n         plugins.\n         \n         *** WARNING ***\n         The settings below authorize Solr to fetch remote files, You\n         should make sure your system has some authentication before\n         using enableRemoteStreaming=\"true\"\n\n      --&gt; \n    &lt;requestParsers enableRemoteStreaming=\"true\" \n                    multipartUploadLimitInKB=\"2048000\"\n                    formdataUploadLimitInKB=\"2048\"\n                    addHttpRequestToContext=\"false\"/&gt;\n\n    &lt;!-- HTTP Caching\n\n         Set HTTP caching related parameters (for proxy caches and clients).\n\n         The options below instruct Solr not to output any HTTP Caching\n         related headers\n      --&gt;\n    &lt;httpCaching never304=\"true\" /&gt;\n    &lt;!-- If you include a &lt;cacheControl&gt; directive, it will be used to\n         generate a Cache-Control header (as well as an Expires header\n         if the value contains \"max-age=\")\n
          \n         By default, no Cache-Control header is generated.\n         \n         You can use the &lt;cacheControl&gt; option even if you have set\n         never304=\"true\"\n      --&gt;\n    &lt;!--\n       &lt;httpCaching never304=\"true\" &gt;\n         &lt;cacheControl&gt;max-age=30, public&lt;/cacheControl&gt; \n       &lt;/httpCaching&gt;\n      --&gt;\n    &lt;!-- To enable Solr to respond with automatically generated HTTP\n         Caching headers, and to response to Cache Validation requests\n         correctly, set the value of never304=\"false\"\n         \n         This will cause Solr to generate Last-Modified and ETag\n         headers based on the properties of the Index.\n\n         The following options can also be specified to affect the\n         values of these headers...\n\n         lastModFrom - the default value is \"openTime\" which means the\n         Last-Modified value (and validation against If-Modified-Since\n         requests) will all be rel
 ative to when the current Searcher\n         was opened.  You can change it to lastModFrom=\"dirLastMod\" if\n         you want the value to exactly correspond to when the physical\n         index was last modified.\n\n         etagSeed=\"...\" is an option you can change to force the ETag\n         header (and validation against If-None-Match requests) to be\n         different even if the index has not changed (ie: when making\n         significant changes to your config file)\n\n         (lastModifiedFrom and etagSeed are both ignored if you use\n         the never304=\"true\" option)\n      --&gt;\n    &lt;!--\n       &lt;httpCaching lastModifiedFrom=\"openTime\"\n                    etagSeed=\"Solr\"&gt;\n         &lt;cacheControl&gt;max-age=30, public&lt;/cacheControl&gt; \n       &lt;/httpCaching&gt;\n      --&gt;\n  &lt;/requestDispatcher&gt;\n\n  &lt;!-- Request Handlers \n\n       http://wiki.apache.org/solr/SolrRequestHandler\n\n       Incoming queries will be dispatched 
 to a specific handler by name\n       based on the path specified in the request.\n\n       Legacy behavior: If the request path uses \"/select\" but no Request\n       Handler has that name, and if handleSelect=\"true\" has been specified in\n       the requestDispatcher, then the Request Handler is dispatched based on\n       the qt parameter.  Handlers without a leading '/' are accessed this way\n       like so: http://host/app/[core/]select?qt=name  If no qt is\n       given, then the requestHandler that declares default=\"true\" will be\n       used or the one named \"standard\".\n\n       If a Request Handler is declared with startup=\"lazy\", then it will\n       not be initialized until the first request that uses it.\n\n    --&gt;\n\n  &lt;requestHandler name=\"/dataimport\" class=\"solr.DataImportHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"config\"&gt;solr-data-config.xml&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Searc
 hHandler\n\n       http://wiki.apache.org/solr/SearchHandler\n\n       For processing Search Queries, the primary Request Handler\n       provided with Solr is \"SearchHandler\" It delegates to a sequent\n       of SearchComponents (see below) and supports distributed\n       queries across multiple shards\n    --&gt;\n  &lt;requestHandler name=\"/select\" class=\"solr.SearchHandler\"&gt;\n    &lt;!-- default values for query parameters can be specified, these\n         will be overridden by parameters in the request\n      --&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n       &lt;int name=\"rows\"&gt;10&lt;/int&gt;\n       &lt;str name=\"df\"&gt;text&lt;/str&gt;\n     &lt;/lst&gt;\n    &lt;!-- In addition to defaults, \"appends\" params can be specified\n         to identify values which should be appended to the list of\n         multi-val params from the query (or the existing \"defaults\").\n      --&gt;\n    &lt;!-- In th
 is example, the param \"fq=instock:true\" would be appended to\n         any query time fq params the user may specify, as a mechanism for\n         partitioning the index, independent of any user selected filtering\n         that may also be desired (perhaps as a result of faceted searching).\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"appends\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"appends\"&gt;\n         &lt;str name=\"fq\"&gt;inStock:true&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n    &lt;!-- \"invariants\" are a way of letting the Solr maintainer lock down\n         the options available to Solr clients.  Any params values\n         specified here are used regardless of what values may be specified\n         in either the query, the \"defaults\", or the \"appends\" params.\n\n         In this example, the facet.f
 ield and facet.query params would\n         be fixed, limiting the facets clients can use.  Faceting is\n         not turned on by default - but if the client does specify\n         facet=true in the request, these are the only facets they\n         will be able to see counts for; regardless of what other\n         facet.field or facet.query params they may specify.\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"invariants\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"invariants\"&gt;\n         &lt;str name=\"facet.field\"&gt;cat&lt;/str&gt;\n         &lt;str name=\"facet.field\"&gt;manu_exact&lt;/str&gt;\n         &lt;str name=\"facet.query\"&gt;price:[* TO 500]&lt;/str&gt;\n         &lt;str name=\"facet.query\"&gt;price:[500 TO *]&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n    &lt;!-- If the default list of SearchComponents 
 is not desired, that\n         list can either be overridden completely, or components can be\n         prepended or appended to the default list.  (see below)\n      --&gt;\n    &lt;!--\n       &lt;arr name=\"components\"&gt;\n         &lt;str&gt;nameOfCustomComponent1&lt;/str&gt;\n         &lt;str&gt;nameOfCustomComponent2&lt;/str&gt;\n       &lt;/arr&gt;\n      --&gt;\n    &lt;/requestHandler&gt;\n\n  &lt;!-- A request handler that returns indented JSON by default --&gt;\n  &lt;requestHandler name=\"/query\" class=\"solr.SearchHandler\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n       &lt;str name=\"wt\"&gt;json&lt;/str&gt;\n       &lt;str name=\"indent\"&gt;true&lt;/str&gt;\n       &lt;str name=\"df\"&gt;text&lt;/str&gt;\n     &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- realtime get handler, guaranteed to return the latest stored fields of\n       any document, without the need to commit or open a new searcher
 .  The\n       current implementation relies on the updateLog feature being enabled.\n\n       ** WARNING **\n       Do NOT disable the realtime get handler at /get if you are using\n       SolrCloud otherwise any leader election will cause a full sync in ALL\n       replicas for the shard in question. Similarly, a replica recovery will\n       also always fetch the complete index from the leader because a partial\n       sync will not be possible in the absence of this handler.\n  --&gt;\n  &lt;requestHandler name=\"/get\" class=\"solr.RealTimeGetHandler\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"omitHeader\"&gt;true&lt;/str&gt;\n       &lt;str name=\"wt\"&gt;json&lt;/str&gt;\n       &lt;str name=\"indent\"&gt;true&lt;/str&gt;\n     &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- A Robust Example\n\n       This example SearchHandler declaration shows off usage of the\n       SearchHandler with many defaults declared\n\n       Note that multiple instance
 s of the same Request Handler\n       (SearchHandler) can be registered multiple times with different\n       names (and different init parameters)\n    --&gt;\n  &lt;requestHandler name=\"/browse\" class=\"solr.SearchHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n\n      &lt;!-- VelocityResponseWriter settings --&gt;\n      &lt;str name=\"wt\"&gt;velocity&lt;/str&gt;\n      &lt;str name=\"v.template\"&gt;browse&lt;/str&gt;\n      &lt;str name=\"v.layout\"&gt;layout&lt;/str&gt;\n\n      &lt;!-- Query settings --&gt;\n      &lt;str name=\"defType\"&gt;edismax&lt;/str&gt;\n      &lt;str name=\"q.alt\"&gt;*:*&lt;/str&gt;\n      &lt;str name=\"rows\"&gt;10&lt;/str&gt;\n      &lt;str name=\"fl\"&gt;*,score&lt;/str&gt;\n\n      &lt;!-- Faceting defaults --&gt;\n      &lt;str name=\"facet\"&gt;on&lt;/str&gt;\n      &lt;str name=\"facet.mincount\"&gt;1&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;initParams
  path=\"/update/**,/query,/select,/tvrh,/elevate,/spell,/browse\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n      &lt;str name=\"update.chain\"&gt;add-unknown-fields-to-the-schema&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/initParams&gt;\n\n  &lt;!-- Update Request Handler.\n       \n       http://wiki.apache.org/solr/UpdateXmlMessages\n\n       The canonical Request Handler for Modifying the Index through\n       commands specified using XML, JSON, CSV, or JAVABIN\n\n       Note: Since solr1.1 requestHandlers requires a valid content\n       type header if posted in the body. For example, curl now\n       requires: -H 'Content-type:text/xml; charset=utf-8'\n       \n       To override the request content type and force a specific \n       Content-type, use the request parameter: \n         ?update.contentType=text/csv\n       \n       This handler will pick a response format to match the input\n       if the 'wt' parameter is not explicit\n  
   --&gt;\n  &lt;requestHandler name=\"/update\" class=\"solr.UpdateRequestHandler\"&gt;\n    &lt;!-- See below for information on defining \n         updateRequestProcessorChains that can be used by name \n         on each Update Request\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"defaults\"&gt;\n         &lt;str name=\"update.chain\"&gt;dedupe&lt;/str&gt;\n       &lt;/lst&gt;\n       --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Solr Cell Update Request Handler\n\n       http://wiki.apache.org/solr/ExtractingRequestHandler \n\n    --&gt;\n  &lt;requestHandler name=\"/update/extract\" \n                  startup=\"lazy\"\n                  class=\"solr.extraction.ExtractingRequestHandler\" &gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"lowernames\"&gt;true&lt;/str&gt;\n      &lt;str name=\"uprefix\"&gt;ignored_&lt;/str&gt;\n\n      &lt;!-- capture link hrefs but ignore div attributes --&gt;\n      &lt;str name=\"captureAttr\"&gt;true&lt;/str&gt;\n      &lt;str 
 name=\"fmap.a\"&gt;links&lt;/str&gt;\n      &lt;str name=\"fmap.div\"&gt;ignored_&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- Field Analysis Request Handler\n\n       RequestHandler that provides much the same functionality as\n       analysis.jsp. Provides the ability to specify multiple field\n       types and field names in the same request and outputs\n       index-time and query-time analysis for each of them.\n\n       Request parameters are:\n       analysis.fieldname - field name whose analyzers are to be used\n\n       analysis.fieldtype - field type whose analyzers are to be used\n       analysis.fieldvalue - text for index-time analysis\n       q (or analysis.q) - text for query time analysis\n       analysis.showmatch (true|false) - When set to true and when\n           query analysis is performed, the produced tokens of the\n           field value analysis will be marked as \"matched\" for every\n           token that is produces by the query
  analysis\n   --&gt;\n  &lt;requestHandler name=\"/analysis/field\" \n                  startup=\"lazy\"\n                  class=\"solr.FieldAnalysisRequestHandler\" /&gt;\n\n\n  &lt;!-- Document Analysis Handler\n\n       http://wiki.apache.org/solr/AnalysisRequestHandler\n\n       An analysis handler that provides a breakdown of the analysis\n       process of provided documents. This handler expects a (single)\n       content stream with the following format:\n\n       &lt;docs&gt;\n         &lt;doc&gt;\n           &lt;field name=\"id\"&gt;1&lt;/field&gt;\n           &lt;field name=\"name\"&gt;The Name&lt;/field&gt;\n           &lt;field name=\"text\"&gt;The Text Value&lt;/field&gt;\n         &lt;/doc&gt;\n         &lt;doc&gt;...&lt;/doc&gt;\n         &lt;doc&gt;...&lt;/doc&gt;\n         ...\n       &lt;/docs&gt;\n\n    Note: Each document must contain a field which serves as the\n    unique key. This key is used in the returned response to associate\n    an analysis breakdown t
 o the analyzed document.\n\n    Like the FieldAnalysisRequestHandler, this handler also supports\n    query analysis by sending either an \"analysis.query\" or \"q\"\n    request parameter that holds the query text to be analyzed. It\n    also supports the \"analysis.showmatch\" parameter which when set to\n    true, all field tokens that match the query tokens will be marked\n    as a \"match\". \n  --&gt;\n  &lt;requestHandler name=\"/analysis/document\" \n                  class=\"solr.DocumentAnalysisRequestHandler\" \n                  startup=\"lazy\" /&gt;\n\n  &lt;!-- Admin Handlers\n\n       Admin Handlers - This will register all the standard admin\n       RequestHandlers.  \n    --&gt;\n  &lt;requestHandler name=\"/admin/\" \n                  class=\"solr.admin.AdminHandlers\" /&gt;\n  &lt;!-- This single handler is equivalent to the following... --&gt;\n  &lt;!--\n     &lt;requestHandler name=\"/admin/luke\"       class=\"solr.admin.LukeRequestHandler\" /&gt;\n     &lt;
 requestHandler name=\"/admin/system\"     class=\"solr.admin.SystemInfoHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/plugins\"    class=\"solr.admin.PluginInfoHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/threads\"    class=\"solr.admin.ThreadDumpHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/properties\" class=\"solr.admin.PropertiesRequestHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/file\"       class=\"solr.admin.ShowFileRequestHandler\" &gt;\n    --&gt;\n  &lt;!-- If you wish to hide files under ${solr.home}/conf, explicitly\n       register the ShowFileRequestHandler using the definition below. \n       NOTE: The glob pattern ('*') is the only pattern supported at present, *.xml will\n             not exclude all files ending in '.xml'. Use it to exclude _all_ updates\n    --&gt;\n  &lt;!--\n     &lt;requestHandler name=\"/admin/file\" \n                     class=\"solr.admin.ShowFileRequestHandler\" &gt;\n       &lt;lst name=\"invariants\"&
 gt;\n         &lt;str name=\"hidden\"&gt;synonyms.txt&lt;/str&gt; \n         &lt;str name=\"hidden\"&gt;anotherfile.txt&lt;/str&gt; \n         &lt;str name=\"hidden\"&gt;*&lt;/str&gt; \n       &lt;/lst&gt;\n     &lt;/requestHandler&gt;\n    --&gt;\n\n  &lt;!--\n    Enabling this request handler (which is NOT a default part of the admin handler) will allow the Solr UI to edit\n    all the config files. This is intended for secure/development use ONLY! Leaving available and publically\n    accessible is a security vulnerability and should be done with extreme caution!\n  --&gt;\n  &lt;!--\n  &lt;requestHandler name=\"/admin/fileedit\" class=\"solr.admin.EditFileRequestHandler\" &gt;\n    &lt;lst name=\"invariants\"&gt;\n         &lt;str name=\"hidden\"&gt;synonyms.txt&lt;/str&gt;\n         &lt;str name=\"hidden\"&gt;anotherfile.txt&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n  --&gt;\n  &lt;!-- ping/healthcheck --&gt;\n  &lt;requestHandler name=\"/admin/ping\" class=\"so
 lr.PingRequestHandler\"&gt;\n    &lt;lst name=\"invariants\"&gt;\n      &lt;str name=\"q\"&gt;solrpingquery&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;all&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;!-- An optional feature of the PingRequestHandler is to configure the \n         handler with a \"healthcheckFile\" which can be used to enable/disable \n         the PingRequestHandler.\n         relative paths are resolved against the data dir \n      --&gt;\n    &lt;!-- &lt;str name=\"healthcheckFile\"&gt;server-enabled.txt&lt;/str&gt; --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Echo the request contents back to the client --&gt;\n  &lt;requestHandler name=\"/debug/dump\" class=\"solr.DumpRequestHandler\" &gt;\n    &lt;lst name=\"defaults\"&gt;\n     &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt; \n     &lt;str name=\"echoHandler\"&gt;true&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n  \n  &lt;!-- Solr Replica
 tion\n\n       The SolrReplicationHandler supports replicating indexes from a\n       \"master\" used for indexing and \"slaves\" used for queries.\n\n       http://wiki.apache.org/solr/SolrReplication \n\n       It is also necessary for SolrCloud to function (in Cloud mode, the\n       replication handler is used to bulk transfer segments when nodes \n       are added or need to recover).\n\n       https://wiki.apache.org/solr/SolrCloud/\n    --&gt;\n  &lt;requestHandler name=\"/replication\" class=\"solr.ReplicationHandler\" &gt; \n    &lt;!--\n       To enable simple master/slave replication, uncomment one of the \n       sections below, depending on whether this solr instance should be\n       the \"master\" or a \"slave\".  If this instance is a \"slave\" you will \n       also need to fill in the masterUrl to point to a real machine.\n    --&gt;\n    &lt;!--\n       &lt;lst name=\"master\"&gt;\n         &lt;str name=\"replicateAfter\"&gt;commit&lt;/str&gt;\n         &lt;str na
 me=\"replicateAfter\"&gt;startup&lt;/str&gt;\n         &lt;str name=\"confFiles\"&gt;schema.xml,stopwords.txt&lt;/str&gt;\n       &lt;/lst&gt;\n    --&gt;\n    &lt;!--\n       &lt;lst name=\"slave\"&gt;\n         &lt;str name=\"masterUrl\"&gt;http://your-master-hostname:8983/solr&lt;/str&gt;\n         &lt;str name=\"pollInterval\"&gt;00:00:60&lt;/str&gt;\n       &lt;/lst&gt;\n    --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Search Components\n\n       Search components are registered to SolrCore and used by \n       instances of SearchHandler (which can access them by name)\n       \n       By default, the following components are available:\n       \n       &lt;searchComponent name=\"query\"     class=\"solr.QueryComponent\" /&gt;\n       &lt;searchComponent name=\"facet\"     class=\"solr.FacetComponent\" /&gt;\n       &lt;searchComponent name=\"mlt\"       class=\"solr.MoreLikeThisComponent\" /&gt;\n       &lt;searchComponent name=\"highlight\" class=\"solr.HighlightComponent\"
  /&gt;\n       &lt;searchComponent name=\"stats\"     class=\"solr.StatsComponent\" /&gt;\n       &lt;searchComponent name=\"debug\"     class=\"solr.DebugComponent\" /&gt;\n   \n       Default configuration in a requestHandler would look like:\n\n       &lt;arr name=\"components\"&gt;\n         &lt;str&gt;query&lt;/str&gt;\n         &lt;str&gt;facet&lt;/str&gt;\n         &lt;str&gt;mlt&lt;/str&gt;\n         &lt;str&gt;highlight&lt;/str&gt;\n         &lt;str&gt;stats&lt;/str&gt;\n         &lt;str&gt;debug&lt;/str&gt;\n       &lt;/arr&gt;\n\n       If you register a searchComponent to one of the standard names, \n       that will be used instead of the default.\n\n       To insert components before or after the 'standard' components, use:\n    \n       &lt;arr name=\"first-components\"&gt;\n         &lt;str&gt;myFirstComponentName&lt;/str&gt;\n       &lt;/arr&gt;\n    \n       &lt;arr name=\"last-components\"&gt;\n         &lt;str&gt;myLastComponentName&lt;/str&gt;\n       &lt;/arr&g
 t;\n\n       NOTE: The component registered with the name \"debug\" will\n       always be executed after the \"last-components\" \n       \n     --&gt;\n  \n   &lt;!-- Spell Check\n\n        The spell check component can return a list of alternative spelling\n        suggestions.  \n\n        http://wiki.apache.org/solr/SpellCheckComponent\n     --&gt;\n  &lt;searchComponent name=\"spellcheck\" class=\"solr.SpellCheckComponent\"&gt;\n\n    &lt;str name=\"queryAnalyzerFieldType\"&gt;key_lower_case&lt;/str&gt;\n\n    &lt;!-- Multiple \"Spell Checkers\" can be declared and used by this\n         component\n      --&gt;\n\n    &lt;!-- a spellchecker built from a field of the main index --&gt;\n    &lt;lst name=\"spellchecker\"&gt;\n      &lt;str name=\"name\"&gt;default&lt;/str&gt;\n      &lt;str name=\"field\"&gt;text&lt;/str&gt;\n      &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n      &lt;!-- the spellcheck distance measure used, the default is the internal
  levenshtein --&gt;\n      &lt;str name=\"distanceMeasure\"&gt;internal&lt;/str&gt;\n      &lt;!-- minimum accuracy needed to be considered a valid spellcheck suggestion --&gt;\n      &lt;float name=\"accuracy\"&gt;0.5&lt;/float&gt;\n      &lt;!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 --&gt;\n      &lt;int name=\"maxEdits\"&gt;2&lt;/int&gt;\n      &lt;!-- the minimum shared prefix when enumerating terms --&gt;\n      &lt;int name=\"minPrefix\"&gt;1&lt;/int&gt;\n      &lt;!-- maximum number of inspections per result. --&gt;\n      &lt;int name=\"maxInspections\"&gt;5&lt;/int&gt;\n      &lt;!-- minimum length of a query term to be considered for correction --&gt;\n      &lt;int name=\"minQueryLength\"&gt;4&lt;/int&gt;\n      &lt;!-- maximum threshold of documents a query term can appear to be considered for correction --&gt;\n      &lt;float name=\"maxQueryFrequency\"&gt;0.01&lt;/float&gt;\n      &lt;!-- uncomment this to require suggestions to occur in 
 1% of the documents\n        &lt;float name=\"thresholdTokenFrequency\"&gt;.01&lt;/float&gt;\n      --&gt;\n    &lt;/lst&gt;\n    \n    &lt;!-- a spellchecker that can break or combine words.  See \"/spell\" handler below for usage --&gt;\n    &lt;lst name=\"spellchecker\"&gt;\n      &lt;str name=\"name\"&gt;wordbreak&lt;/str&gt;\n      &lt;str name=\"classname\"&gt;solr.WordBreakSolrSpellChecker&lt;/str&gt;      \n      &lt;str name=\"field\"&gt;name&lt;/str&gt;\n      &lt;str name=\"combineWords\"&gt;true&lt;/str&gt;\n      &lt;str name=\"breakWords\"&gt;true&lt;/str&gt;\n      &lt;int name=\"maxChanges\"&gt;10&lt;/int&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- a spellchecker that uses a different distance measure --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"name\"&gt;jarowinkler&lt;/str&gt;\n         &lt;str name=\"field\"&gt;spell&lt;/str&gt;\n         &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n         &lt;str name
 =\"distanceMeasure\"&gt;\n           org.apache.lucene.search.spell.JaroWinklerDistance\n         &lt;/str&gt;\n       &lt;/lst&gt;\n     --&gt;\n\n    &lt;!-- a spellchecker that use an alternate comparator \n\n         comparatorClass be one of:\n          1. score (default)\n          2. freq (Frequency first, then score)\n          3. A fully qualified class name\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"name\"&gt;freq&lt;/str&gt;\n         &lt;str name=\"field\"&gt;lowerfilt&lt;/str&gt;\n         &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n         &lt;str name=\"comparatorClass\"&gt;freq&lt;/str&gt;\n      --&gt;\n\n    &lt;!-- A spellchecker that reads the list of words from a file --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"classname\"&gt;solr.FileBasedSpellChecker&lt;/str&gt;\n         &lt;str name=\"name\"&gt;file&lt;/str&gt;\n         &lt;str name=\"so
 urceLocation\"&gt;spellings.txt&lt;/str&gt;\n         &lt;str name=\"characterEncoding\"&gt;UTF-8&lt;/str&gt;\n         &lt;str name=\"spellcheckIndexDir\"&gt;spellcheckerFile&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n  &lt;/searchComponent&gt;\n  \n  &lt;!-- A request handler for demonstrating the spellcheck component.  \n\n       NOTE: This is purely as an example.  The whole purpose of the\n       SpellCheckComponent is to hook it into the request handler that\n       handles your normal user queries so that a separate request is\n       not needed to get suggestions.\n\n       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS\n       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!\n       \n       See http://wiki.apache.org/solr/SpellCheckComponent for details\n       on the request parameters.\n    --&gt;\n  &lt;requestHandler name=\"/spell\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&
 lt;/str&gt;\n      &lt;!-- Solr will use suggestions from both the 'default' spellchecker\n           and from the 'wordbreak' spellchecker and combine them.\n           collations (re-written queries) can include a combination of\n           corrections from both spellcheckers --&gt;\n      &lt;str name=\"spellcheck.dictionary\"&gt;default&lt;/str&gt;\n      &lt;str name=\"spellcheck.dictionary\"&gt;wordbreak&lt;/str&gt;\n      &lt;str name=\"spellcheck\"&gt;on&lt;/str&gt;\n      &lt;str name=\"spellcheck.extendedResults\"&gt;true&lt;/str&gt;       \n      &lt;str name=\"spellcheck.count\"&gt;10&lt;/str&gt;\n      &lt;str name=\"spellcheck.alternativeTermCount\"&gt;5&lt;/str&gt;\n      &lt;str name=\"spellcheck.maxResultsForSuggest\"&gt;5&lt;/str&gt;       \n      &lt;str name=\"spellcheck.collate\"&gt;true&lt;/str&gt;\n      &lt;str name=\"spellcheck.collateExtendedResults\"&gt;true&lt;/str&gt;  \n      &lt;str name=\"spellcheck.maxCollationTries\"&gt;10&lt;/str&gt;\n      &lt;str
  name=\"spellcheck.maxCollations\"&gt;5&lt;/str&gt;         \n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;spellcheck&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;searchComponent name=\"suggest\" class=\"solr.SuggestComponent\"&gt;\n   &lt;lst name=\"suggester\"&gt;\n      &lt;str name=\"name\"&gt;mySuggester&lt;/str&gt;\n      &lt;str name=\"lookupImpl\"&gt;FuzzyLookupFactory&lt;/str&gt;      &lt;!-- org.apache.solr.spelling.suggest.fst --&gt;\n      &lt;str name=\"dictionaryImpl\"&gt;DocumentDictionaryFactory&lt;/str&gt;     &lt;!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory --&gt; \n      &lt;str name=\"field\"&gt;cat&lt;/str&gt;\n      &lt;str name=\"weightField\"&gt;price&lt;/str&gt;\n      &lt;str name=\"suggestAnalyzerFieldType\"&gt;string&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;requestHandler name=\"/suggest\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst nam
 e=\"defaults\"&gt;\n      &lt;str name=\"suggest\"&gt;true&lt;/str&gt;\n      &lt;str name=\"suggest.count\"&gt;10&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"components\"&gt;\n      &lt;str&gt;suggest&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n  &lt;!-- Term Vector Component\n\n       http://wiki.apache.org/solr/TermVectorComponent\n    --&gt;\n  &lt;searchComponent name=\"tvComponent\" class=\"solr.TermVectorComponent\"/&gt;\n\n  &lt;!-- A request handler for demonstrating the term vector component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your \n       already specified request handlers. \n    --&gt;\n  &lt;requestHandler name=\"/tvrh\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n      &lt;bool name=\"tv\"&gt;true&lt;/bool&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;tvCom
 ponent&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Clustering Component\n\n       You'll need to set the solr.clustering.enabled system property\n       when running solr to run with clustering enabled:\n\n            java -Dsolr.clustering.enabled=true -jar start.jar\n\n       http://wiki.apache.org/solr/ClusteringComponent\n       http://carrot2.github.io/solr-integration-strategies/\n    --&gt;\n  &lt;searchComponent name=\"clustering\"\n                   enable=\"${solr.clustering.enabled:false}\"\n                   class=\"solr.clustering.ClusteringComponent\" &gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;lingo&lt;/str&gt;\n\n      &lt;!-- Class name of a clustering algorithm compatible with the Carrot2 framework.\n\n           Currently available open source algorithms are:\n           * org.carrot2.clustering.lingo.LingoClusteringAlgorithm\n           * org.carrot2.clustering.stc.STCClusteringAlgorithm\n           * org.carr
 ot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm\n\n           See http://project.carrot2.org/algorithms.html for more information.\n\n           A commercial algorithm Lingo3G (needs to be installed separately) is defined as:\n           * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm\n        --&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.lingo.LingoClusteringAlgorithm&lt;/str&gt;\n\n      &lt;!-- Override location of the clustering algorithm's resources \n           (attribute definitions and lexical resources).\n\n           A directory from which to load algorithm-specific stop words,\n           stop labels and attribute definition XMLs. \n\n           For an overview of Carrot2 lexical resources, see:\n           http://download.carrot2.org/head/manual/#chapter.lexical-resources\n\n           For an overview of Lingo3G lexical resources, see:\n           http://download.carrotsearch.com/lingo3g/manual/#chapter.lexical-resources\n      
  --&gt;\n      &lt;str name=\"carrot.resourcesDir\"&gt;clustering/carrot2&lt;/str&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- An example definition for the STC clustering algorithm. --&gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;stc&lt;/str&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.stc.STCClusteringAlgorithm&lt;/str&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- An example definition for the bisecting kmeans clustering algorithm. --&gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;kmeans&lt;/str&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;!-- A request handler for demonstrating the clustering component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your \n       already specified request handlers. \n    --&gt;\n  &lt;requestHandler nam
 e=\"/clustering\"\n                  startup=\"lazy\"\n                  enable=\"${solr.clustering.enabled:false}\"\n                  class=\"solr.SearchHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;bool name=\"clustering\"&gt;true&lt;/bool&gt;\n      &lt;bool name=\"clustering.results\"&gt;true&lt;/bool&gt;\n      &lt;!-- Field name with the logical \"title\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.title\"&gt;name&lt;/str&gt;\n      &lt;!-- Field name with the logical \"URL\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.url\"&gt;id&lt;/str&gt;\n      &lt;!-- Field name with the logical \"content\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.snippet\"&gt;features&lt;/str&gt;\n      &lt;!-- Apply highlighter to the title/ content and use this for clustering. --&gt;\n      &lt;bool name=\"carrot.produceSummary\"&gt;true&lt;/bool&gt;\n      &lt;!-- the maximum number of labels per cluster --&gt;\n     
  &lt;!--&lt;int name=\"carrot.numDescriptions\"&gt;5&lt;/int&gt;--&gt;\n      &lt;!-- produce sub clusters --&gt;\n      &lt;bool name=\"carrot.outputSubClusters\"&gt;false&lt;/bool&gt;\n\n      &lt;!-- Configure the remaining request handler parameters. --&gt;\n      &lt;str name=\"defType\"&gt;edismax&lt;/str&gt;\n      &lt;str name=\"qf\"&gt;\n        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4\n      &lt;/str&gt;\n      &lt;str name=\"q.alt\"&gt;*:*&lt;/str&gt;\n      &lt;str name=\"rows\"&gt;10&lt;/str&gt;\n      &lt;str name=\"fl\"&gt;*,score&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;clustering&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n  \n  &lt;!-- Terms Component\n\n       http://wiki.apache.org/solr/TermsComponent\n\n       A component to return terms and document frequency of those\n       terms\n    --&gt;\n  &lt;searchComponent name=\"terms\" class=\"solr.TermsComponent\"/&gt;\n\n  &lt;!-
 - A request handler for demonstrating the terms component --&gt;\n  &lt;requestHandler name=\"/terms\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n      &lt;bool name=\"terms\"&gt;true&lt;/bool&gt;\n      &lt;bool name=\"distrib\"&gt;false&lt;/bool&gt;\n    &lt;/lst&gt;     \n    &lt;arr name=\"components\"&gt;\n      &lt;str&gt;terms&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- Query Elevation Component\n\n       http://wiki.apache.org/solr/QueryElevationComponent\n\n       a search component that enables you to configure the top\n       results for a given query regardless of the normal lucene\n       scoring.\n    --&gt;\n  &lt;searchComponent name=\"elevator\" class=\"solr.QueryElevationComponent\" &gt;\n    &lt;!-- pick a fieldType to analyze queries --&gt;\n    &lt;str name=\"queryFieldType\"&gt;string&lt;/str&gt;\n    &lt;str name=\"config-file\"&gt;elevate.xml&lt;/str&gt;\n  &lt;/searchComponent&gt;\n\n  &
 lt;!-- A request handler for demonstrating the elevator component --&gt;\n  &lt;requestHandler name=\"/elevate\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;elevator&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Highlighting Component\n\n       http://wiki.apache.org/solr/HighlightingParameters\n    --&gt;\n  &lt;searchComponent class=\"solr.HighlightComponent\" name=\"highlight\"&gt;\n    &lt;highlighting&gt;\n      &lt;!-- Configure the standard fragmenter --&gt;\n      &lt;!-- This could most likely be commented out in the \"default\" case --&gt;\n      &lt;fragmenter name=\"gap\" \n                  default=\"true\"\n                  class=\"solr.highlight.GapFragmenter\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;int 
 name=\"hl.fragsize\"&gt;100&lt;/int&gt;\n        &lt;/lst&gt;\n      &lt;/fragmenter&gt;\n\n      &lt;!-- A regular-expression-based fragmenter \n           (for sentence extraction) \n        --&gt;\n      &lt;fragmenter name=\"regex\" \n                  class=\"solr.highlight.RegexFragmenter\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;!-- slightly smaller fragsizes work better because of slop --&gt;\n          &lt;int name=\"hl.fragsize\"&gt;70&lt;/int&gt;\n          &lt;!-- allow 50% slop on fragment sizes --&gt;\n          &lt;float name=\"hl.regex.slop\"&gt;0.5&lt;/float&gt;\n          &lt;!-- a basic sentence pattern --&gt;\n          &lt;str name=\"hl.regex.pattern\"&gt;[-\\w ,/\\n\\&amp;quot;&amp;apos;]{20,200}&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/fragmenter&gt;\n\n      &lt;!-- Configure the standard formatter --&gt;\n      &lt;formatter name=\"html\" \n                 default=\"true\"\n                 class=\"solr.highlight.HtmlFormatter\"&gt;\n
         &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.simple.pre\"&gt;&lt;![CDATA[&lt;em&gt;]]&gt;&lt;/str&gt;\n          &lt;str name=\"hl.simple.post\"&gt;&lt;![CDATA[&lt;/em&gt;]]&gt;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/formatter&gt;\n\n      &lt;!-- Configure the standard encoder --&gt;\n      &lt;encoder name=\"html\" \n               class=\"solr.highlight.HtmlEncoder\" /&gt;\n\n      &lt;!-- Configure the standard fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"simple\" \n                       class=\"solr.highlight.SimpleFragListBuilder\"/&gt;\n      \n      &lt;!-- Configure the single fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"single\" \n                       class=\"solr.highlight.SingleFragListBuilder\"/&gt;\n      \n      &lt;!-- Configure the weighted fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"weighted\" \n                       default=\"true\"\n                       class=\"solr.highlight.WeightedFragList
 Builder\"/&gt;\n      \n      &lt;!-- default tag FragmentsBuilder --&gt;\n      &lt;fragmentsBuilder name=\"default\" \n                        default=\"true\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\"&gt;\n        &lt;!-- \n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.multiValuedSeparatorChar\"&gt;/&lt;/str&gt;\n        &lt;/lst&gt;\n        --&gt;\n      &lt;/fragmentsBuilder&gt;\n\n      &lt;!-- multi-colored tag FragmentsBuilder --&gt;\n      &lt;fragmentsBuilder name=\"colored\" \n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.tag.pre\"&gt;&lt;![CDATA[\n               &lt;b style=\"background:yellow\"&gt;,&lt;b style=\"background:lawgreen\"&gt;,\n               &lt;b style=\"background:aquamarine\"&gt;,&lt;b style=\"background:magenta\"&gt;,\n               &lt;b style=\"background:palegreen\"&gt;,&lt;b style=\"backgroun
 d:coral\"&gt;,\n               &lt;b style=\"background:wheat\"&gt;,&lt;b style=\"background:khaki\"&gt;,\n               &lt;b style=\"background:lime\"&gt;,&lt;b style=\"background:deepskyblue\"&gt;]]&gt;&lt;/str&gt;\n          &lt;str name=\"hl.tag.post\"&gt;&lt;![CDATA[&lt;/b&gt;]]&gt;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/fragmentsBuilder&gt;\n      \n      &lt;boundaryScanner name=\"default\" \n                       default=\"true\"\n                       class=\"solr.highlight.SimpleBoundaryScanner\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.bs.maxScan\"&gt;10&lt;/str&gt;\n          &lt;str name=\"hl.bs.chars\"&gt;.,!? &amp;#9;&amp;#10;&amp;#13;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/boundaryScanner&gt;\n      \n      &lt;boundaryScanner name=\"breakIterator\" \n                       class=\"solr.highlight.BreakIteratorBoundaryScanner\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;!-- type should be one of CHARACTER
 , WORD(default), LINE and SENTENCE --&gt;\n          &lt;str name=\"hl.bs.type\"&gt;WORD&lt;/str&gt;\n          &lt;!-- language and country are used when constructing Locale object.  --&gt;\n          &lt;!-- And the Locale object will be used when getting instance of BreakIterator --&gt;\n          &lt;str name=\"hl.bs.language\"&gt;en&lt;/str&gt;\n          &lt;str name=\"hl.bs.country\"&gt;US&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/boundaryScanner&gt;\n    &lt;/highlighting&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;!-- Update Processors\n\n       Chains of Update Processor Factories for dealing with Update\n       Requests can be declared, and then used by name in Update\n       Request Processors\n\n       http://wiki.apache.org/solr/UpdateRequestProcessor\n\n    --&gt; \n\n  &lt;!-- Add unknown fields to the schema \n  \n       An example field type guessing update processor that will\n       attempt to parse string-typed field values as Booleans, Longs,\n       Doubles, 
 or Dates, and then add schema fields with the guessed\n       field types.  \n       \n       This requires that the schema is both managed and mutable, by\n       declaring schemaFactory as ManagedIndexSchemaFactory, with\n       mutable specified as true. \n       \n       See http://wiki.apache.org/solr/GuessingFieldTypes\n    --&gt;\n  &lt;updateRequestProcessorChain name=\"add-unknown-fields-to-the-schema\"&gt;\n\n    &lt;processor class=\"solr.DefaultValueUpdateProcessorFactory\"&gt;\n        &lt;str name=\"fieldName\"&gt;_ttl_&lt;/str&gt;\n        &lt;str name=\"value\"&gt;+{{logsearch_service_logs_max_retention}}DAYS&lt;/str&gt;\n    &lt;/processor&gt;\n    &lt;processor class=\"solr.processor.DocExpirationUpdateProcessorFactory\"&gt;\n        &lt;int name=\"autoDeletePeriodSeconds\"&gt;30&lt;/int&gt;\n        &lt;str name=\"ttlFieldName\"&gt;_ttl_&lt;/str&gt;\n        &lt;str name=\"expirationFieldName\"&gt;_expire_at_&lt;/str&gt;\n    &lt;/processor&gt;\n    &lt;processor 
 class=\"solr.FirstFieldValueUpdateProcessorFactory\"&gt;\n      &lt;str name=\"fieldName\"&gt;_expire_at_&lt;/str&gt;\n    &lt;/processor&gt;\n\n\n    &lt;processor class=\"solr.RemoveBlankFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseBooleanFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseLongFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseDoubleFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseDateFieldUpdateProcessorFactory\"&gt;\n      &lt;arr name=\"format\"&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss.SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss,SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss.SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss,SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ssZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mmZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH
 :mm&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss.SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss,SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss.SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss,SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ssZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mmZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd&lt;/str&gt;\n      &lt;/arr&gt;\n    &lt;/processor&gt;\n    &lt;processor class=\"solr.AddSchemaFieldsUpdateProcessorFactory\"&gt;\n      &lt;str name=\"defaultFieldType\"&gt;key_lower_case&lt;/str&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Boolean&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;booleans&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.util.Date&lt;/str&gt;\n        &lt;str nam
 e=\"fieldType\"&gt;tdates&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Long&lt;/str&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Integer&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;tlongs&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Number&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;tdoubles&lt;/str&gt;\n      &lt;/lst&gt;\n    &lt;/processor&gt;\n\n    &lt;processor class=\"solr.LogUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.RunUpdateProcessorFactory\"/&gt;\n  &lt;/updateRequestProcessorChain&gt;\n\n\n  &lt;!-- Deduplication\n\n       An example dedup update processor that creates the \"id\" field\n       on the fly based on the hash code of some other fields.  This\n       example has overwriteDupes set to false since we are using the\n       id field as the signatureField and Solr will ma
 intain\n       uniqueness based on that anyway.  \n       \n    --&gt;\n  &lt;!--\n     &lt;updateRequestProcessorChain name=\"dedupe\"&gt;\n       &lt;processor class=\"solr.processor.SignatureUpdateProcessorFactory\"&gt;\n         &lt;bool name=\"enabled\"&gt;true&lt;/bool&gt;\n         &lt;str name=\"signatureField\"&gt;id&lt;/str&gt;\n         &lt;bool name=\"overwriteDupes\"&gt;false&lt;/bool&gt;\n         &lt;str name=\"fields\"&gt;name,features,cat&lt;/str&gt;\n         &lt;str name=\"signatureClass\"&gt;solr.processor.Lookup3Signature&lt;/str&gt;\n       &lt;/processor&gt;\n       &lt;processor class=\"solr.LogUpdateProcessorFactory\" /&gt;\n       &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n     &lt;/updateRequestProcessorChain&gt;\n    --&gt;\n  \n  &lt;!-- Language identification\n\n       This example update chain identifies the language of the incoming\n       documents using the langid contrib. The detected language is\n       written to field languag
 e_s. No field name mapping is done.\n       The fields used for detection are text, title, subject and description,\n       making this example suitable for detecting languages form full-text\n       rich documents injected via ExtractingRequestHandler.\n       See more about langId at http://wiki.apache.org/solr/LanguageDetection\n    --&gt;\n    &lt;!--\n     &lt;updateRequestProcessorChain name=\"langid\"&gt;\n       &lt;processor class=\"org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory\"&gt;\n         &lt;str name=\"langid.fl\"&gt;text,title,subject,description&lt;/str&gt;\n         &lt;str name=\"langid.langField\"&gt;language_s&lt;/str&gt;\n         &lt;str name=\"langid.fallback\"&gt;en&lt;/str&gt;\n       &lt;/processor&gt;\n       &lt;processor class=\"solr.LogUpdateProcessorFactory\" /&gt;\n       &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n     &lt;/updateRequestProcessorChain&gt;\n    --&gt;\n\n  &lt;!-- Script update proces
 sor\n\n    This example hooks in an update processor implemented using JavaScript.\n\n    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor\n  --&gt;\n  &lt;!--\n    &lt;updateRequestProcessorChain name=\"script\"&gt;\n      &lt;processor class=\"solr.StatelessScriptUpdateProcessorFactory\"&gt;\n        &lt;str name=\"script\"&gt;update-script.js&lt;/str&gt;\n        &lt;lst name=\"params\"&gt;\n          &lt;str name=\"config_param\"&gt;example config parameter&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/processor&gt;\n      &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n    &lt;/updateRequestProcessorChain&gt;\n  --&gt;\n \n  &lt;!-- Response Writers\n\n       http://wiki.apache.org/solr/QueryResponseWriter\n\n       Request responses will be written using the writer specified by\n       the 'wt' request parameter matching the name of a registered\n       writer.\n\n       The \"default\" writer is the default and will
  be used if 'wt' is\n       not specified in the request.\n    --&gt;\n  &lt;!-- The following response writers are implicitly configured unless\n       overridden...\n    --&gt;\n  &lt;!--\n     &lt;queryResponseWriter name=\"xml\" \n                          default=\"true\"\n                          class=\"solr.XMLResponseWriter\" /&gt;\n     &lt;queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"python\" class=\"solr.PythonResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"ruby\" class=\"solr.RubyResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"php\" class=\"solr.PHPResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"phps\" class=\"solr.PHPSerializedResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"csv\" class=\"solr.CSVResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"schema.xml\" class=\"solr.SchemaXmlResponseWriter\"/&gt;\n    --&gt;\n\n  &lt;queryResponseWriter name=\"json
 \" class=\"solr.JSONResponseWriter\"&gt;\n     &lt;!-- For the purposes of the tutorial, JSON responses are written as\n      plain text so that they are easy to read in *any* browser.\n      If you expect a MIME type of \"application/json\" just remove this override.\n     --&gt;\n    &lt;str name=\"content-type\"&gt;text/plain; charset=UTF-8&lt;/str&gt;\n  &lt;/queryResponseWriter&gt;\n  \n  &lt;!--\n     Custom response writers can be declared as needed...\n    --&gt;\n  &lt;queryResponseWriter name=\"velocity\" class=\"solr.VelocityResponseWriter\" startup=\"lazy\"&gt;\n    &lt;str name=\"template.base.dir\"&gt;${velocity.template.base.dir:}&lt;/str&gt;\n  &lt;/queryResponseWriter&gt;\n\n  &lt;!-- XSLT response writer transforms the XML output by any xslt file found\n       in Solr's conf/xslt directory.  Changes to xslt files are checked for\n       every xsltCacheLifetimeSeconds.  \n    --&gt;\n  &lt;queryResponseWriter name=\"xslt\" class=\"solr.XSLTResponseWriter\"&gt;\n    
 &lt;int name=\"xsltCacheLifetimeSeconds\"&gt;5&lt;/int&gt;\n  &lt;/queryResponseWriter&gt;\n\n  &lt;!-- Query Parsers\n\n       http://wiki.apache.org/solr/SolrQuerySyntax\n\n       Multiple QParserPlugins can be registered by name, and then\n       used in either the \"defType\" param for the QueryComponent (used\n       by SearchHandler) or in LocalParams\n    --&gt;\n  &lt;!-- example of registering a query parser --&gt;\n  &lt;!--\n     &lt;queryParser name=\"myparser\" class=\"com.mycompany.MyQParserPlugin\"/&gt;\n    --&gt;\n\n  &lt;!-- Function Parsers\n\n       http://wiki.apache.org/solr/FunctionQuery\n\n       Multiple ValueSourceParsers can be registered by name, and then\n       used as function names when using the \"func\" QParser.\n    --&gt;\n  &lt;!-- example of registering a custom function parser  --&gt;\n  &lt;!--\n     &lt;valueSourceParser name=\"myfunc\" \n                        class=\"com.mycompany.MyValueSourceParser\" /&gt;\n    --&gt;\n    \n  \n  &lt;!-
 - Document Transformers\n       http://wiki.apache.org/solr/DocTransformers\n    --&gt;\n  &lt;!--\n     Could be something like:\n     &lt;transformer name=\"db\" class=\"com.mycompany.LoadFromDatabaseTransformer\" &gt;\n       &lt;int name=\"connection\"&gt;jdbc://....&lt;/int&gt;\n     &lt;/transformer&gt;\n     \n     To add a constant value to all docs, use:\n     &lt;transformer name=\"mytrans2\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" &gt;\n       &lt;int name=\"value\"&gt;5&lt;/int&gt;\n     &lt;/transformer&gt;\n     \n     If you want the user to still be able to change it with _value:something_ use this:\n     &lt;transformer name=\"mytrans3\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" &gt;\n       &lt;double name=\"defaultValue\"&gt;5&lt;/double&gt;\n     &lt;/transformer&gt;\n\n      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The\n      EditorialMarkerFactory will do 
 exactly that:\n     &lt;transformer name=\"qecBooster\" class=\"org.apache.sol

<TRUNCATED>

[20/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
index 9547335..3b814a9 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>storm.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>nimbus_seeds_supported</name>
     <value>true</value>
     <description/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index e753e98..4859534 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -170,7 +170,7 @@ if stack_supports_storm_kerberos:
   else:
     storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
 
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 metric_collector_port = None
 if has_metric_collector:
@@ -214,10 +214,10 @@ jar_jvm_opts = ''
 ########################################################
 #region Atlas Hooks
 storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
-
+if enable_atlas_hook:
   # Only append /etc/atlas/conf to classpath if on HDP 2.4.*
   if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
     atlas_conf_dir = os.environ['METADATA_CONF'] if 'METADATA_CONF' in os.environ else '/etc/atlas/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
index bda4fe2..f02ced4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
@@ -99,7 +99,7 @@ def storm(name=None):
   )
 
   # Generate atlas-application.properties.xml file and symlink the hook jars
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.conf_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.STORM, params.storm_atlas_application_properties, atlas_hook_filepath, params.storm_user, params.user_group)
     storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index 1d8963d..a36e8cc 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -126,34 +126,56 @@ class ServiceCheckDefault(ServiceCheck):
       if "application" in item:
         application_name = item
 
-    for rm_webapp_address in params.rm_webapp_addresses_list:
-      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    # Find out the active RM from RM list
+    # Raise an exception if the active rm cannot be determined
+    active_rm_webapp_address = self.get_active_rm_webapp_address()
+    Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
 
-      get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+    # Verify job state from active resource manager via rest api
+    info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
 
-      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                            user=params.smokeuser,
-                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                            )
+    return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
+                                                  user=params.smokeuser,
+                                                  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                  )
 
-      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
-      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
-        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
-        continue
+    try:
+      json_response = json.loads(stdout)
+    except Exception as e:
+      raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
 
-      try:
-        json_response = json.loads(stdout)
-      except Exception as e:
-        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-      
-      if json_response is None or 'app' not in json_response or \
-              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-        raise Fail("Application " + app_url + " returns invalid data.")
-
-      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+    if json_response is None or 'app' not in json_response or \
+            'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
+      raise Fail("Application " + app_url + " returns invalid data.")
 
+    if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+      raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
 
+  def get_active_rm_webapp_address(self):
+    import params
+    active_rm_webapp_address = None
+    rm_webapp_addresses = params.rm_webapp_addresses_list
+    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
+      for rm_webapp_address in rm_webapp_addresses:
+        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
+        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
+        try:
+          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
+                                                        user=params.smokeuser,
+                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                        )
+          json_response = json.loads(stdout)
+          if json_response is not None and 'clusterInfo' in json_response \
+            and json_response['clusterInfo']['haState'] == "ACTIVE":
+              active_rm_webapp_address = rm_webapp_address
+              break
+        except Exception as e:
+          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
+
+    if active_rm_webapp_address is None:
+      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
+    return active_rm_webapp_address
 
 if __name__ == "__main__":
   ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
index 525078e..cbb5ba2 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
@@ -35,7 +35,7 @@
           </packages>
         </osSpecific>
         <osSpecific>
-          <osFamily>ubuntu12</osFamily>
+          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
             <package>
               <name>zookeeper-${stack_version}</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
index 34169c1..ee8d2d1 100755
--- a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
+++ b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
@@ -84,6 +84,10 @@ echo "[" | cat > "$JSON_INPUT"
 while read -r LINE
 do
   USR_NAME=$(echo "$LINE" | awk -F, '{print $1}')
+  echo "Processing user name: $USR_NAME"
+
+  # encoding the username
+  USR_NAME=$(printf "%q" "$USR_NAME")
 
   cat <<EOF >> "$JSON_INPUT"
     {
@@ -97,7 +101,10 @@ do
 EOF
 done <"$CSV_FILE"
 
+# deleting the last line
 sed -i '$ d' "$JSON_INPUT"
+
+# appending json closing elements to the end of the file
 echo $'}\n]' | cat >> "$JSON_INPUT"
 echo "Generating file $JSON_INPUT ... DONE."
 echo "Processing post user creation hook payload ... DONE."

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 5926c39..abfab87 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,13 +70,11 @@ def main(argv=None):
   if len(args) < 3:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   action = args[0]
   if action not in ALL_ACTIONS:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   hostsFile = args[1]
   servicesFile = args[2]
@@ -89,6 +87,7 @@ def main(argv=None):
   stackName = services["Versions"]["stack_name"]
   stackVersion = services["Versions"]["stack_version"]
   parentVersions = []
+
   if "stack_hierarchy" in services["Versions"]:
     parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
 
@@ -96,8 +95,9 @@ def main(argv=None):
 
   # Perform action
   actionDir = os.path.realpath(os.path.dirname(args[1]))
-  result = {}
-  result_file = "non_valid_result_file.json"
+
+  # filter
+  hosts = stackAdvisor.filterHostMounts(hosts, services)
 
   if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
     result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,12 +111,11 @@ def main(argv=None):
   elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
     result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
-  else: # action == VALIDATE_CONFIGURATIONS
+  else:  # action == VALIDATE_CONFIGURATIONS
     result = stackAdvisor.validateConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations-validation.json")
 
   dumpJson(result, result_file)
-  pass
 
 
 def instantiateStackAdvisor(stackName, stackVersion, parentVersions):

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index cc6c8a3..93680bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -280,4 +280,14 @@ gpgcheck=0</value>
     <description>YARN Memory widget should be hidden by default on the dashboard.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+   <property>
+    <name>agent_mounts_ignore_list</name>
+    <value/>
+    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <visible>true</visible>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 320872e..f97789b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -105,13 +105,17 @@ def create_users_and_groups(user_and_groups):
 
   import params
 
-  parts = re.split('\s', user_and_groups)
+  parts = re.split('\s+', user_and_groups)
   if len(parts) == 1:
     parts.append("")
 
   users_list = parts[0].split(",") if parts[0] else []
   groups_list = parts[1].split(",") if parts[1] else []
 
+  # skip creating groups and users if * is provided as value.
+  users_list = filter(lambda x: x != '*' , users_list)
+  groups_list = filter(lambda x: x != '*' , groups_list)
+
   if users_list:
     User(users_list,
           fetch_nonlocal_groups = params.fetch_nonlocal_groups

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 4603c8b..4a5ee25 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -90,7 +90,7 @@ jtnode_host = default("/clusterHostInfo/jtnode_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index dd87b72..27a755c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -289,6 +289,11 @@
       "min_version": "2.5.0.0"
     },
     {
+      "name": "ranger_setup_db_on_start",
+      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+      "min_version": "2.6.0.0"
+    },
+    {
       "name": "storm_metrics_apache_classes",
       "description": "Metrics sink for Storm that uses Apache class names",
       "min_version": "2.5.0.0"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 0c74c02..e47743e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -100,9 +100,23 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
       "AMBARI_METRICS": self.recommendAmsConfigurations,
-      "RANGER": self.recommendRangerConfigurations
+      "RANGER": self.recommendRangerConfigurations,
+      "ZOOKEEPER": self.recommendZookeeperConfigurations,
+      "OOZIE": self.recommendOozieConfigurations
     }
 
+  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    oozie_mount_properties = [
+      ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
+    ]
+    self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
+
+  def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
+    zk_mount_properties = [
+      ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
+    ]
+    self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
+
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
     putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -115,6 +129,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
+    yarn_mount_properties = [
+      ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
+      ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
+      ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
+      ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
+    ]
+
+    self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
+
     sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
     if sc_queue_name is not None:
       putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -145,6 +168,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
     putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
     putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+    mapred_mounts = [
+      ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
+    ]
+
+    self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
+
     mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
     if mr_queue is not None:
       putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -341,12 +371,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       if len(namenodes.split(',')) > 1:
         putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
 
-    #Initialize default 'dfs.datanode.data.dir' if needed
-    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
-      dataDirs = '/hadoop/hdfs/data'
-      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
-    else:
-      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+    hdfs_mount_properties = [
+      ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
+      ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
+      ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
+    ]
+
+    self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
+
+    dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
 
     # dfs.datanode.du.reserved should be set to 10-15% of volume size
     # For each host selects maximum size of the volume. Then gets minimum for all hosts.

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 9678dc1..17225d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,12 +24,30 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
     childRecommendConfDict = {
       "OOZIE": self.recommendOozieConfigurations,
       "HIVE": self.recommendHiveConfigurations,
-      "TEZ": self.recommendTezConfigurations
+      "TEZ": self.recommendTezConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    storm_mounts = [
+      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+    ]
+
+    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+    falcon_mounts = [
+      ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
+    ]
+
+    self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
+
   def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+
     oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
     oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
     putOozieProperty = self.putProperty(configurations, "oozie-site", services)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index ab350dc..287db10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -232,6 +232,10 @@ limitations under the License.
     <depends-on>
       <property>
         <type>hive-env</type>
+        <name>hive.atlas.hook</name>
+      </property>
+      <property>
+        <type>hive-env</type>
         <name>hive_timeline_logging_enabled</name>
       </property>
       <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 8187da8..3789a16 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,10 +44,17 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "LOGSEARCH" : self.recommendLogsearchConfigurations,
       "SPARK": self.recommendSparkConfigurations,
+      "KAFKA": self.recommendKafkaConfigurations,
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
+    kafka_mounts = [
+      ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+    ]
+
+    self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
 
   def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
     """
@@ -1019,27 +1026,50 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def recommendLogsearchConfigurations(self, configurations, clusterData, services, hosts):
     putLogsearchProperty = self.putProperty(configurations, "logsearch-properties", services)
+    putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
+    putLogsearchEnvProperty = self.putProperty(configurations, "logsearch-env", services)
+    putLogsearchEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-env")
+    putLogfeederEnvAttribute = self.putPropertyAttribute(configurations, "logfeeder-env")
+
     infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
 
-    if infraSolrHosts is not None and len(infraSolrHosts) > 0 \
-      and "logsearch-properties" in services["configurations"]:
+    if infraSolrHosts is not None and len(infraSolrHosts) > 0 and "logsearch-properties" in services["configurations"]:
+      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
+      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
+      
       recommendedMinShards = len(infraSolrHosts)
       recommendedShards = 2 * len(infraSolrHosts)
       recommendedMaxShards = 3 * len(infraSolrHosts)
-      # recommend number of shard
-      putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
-
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
-      # recommend replication factor
-      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
-      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
-      putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
-      putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    else:
+      recommendedReplicationFactor = 2
+      
+      recommendedMinShards = 1
+      recommendedShards = 1
+      recommendedMaxShards = 100
+      
+      putLogsearchEnvProperty('logsearch_use_external_solr', 'true')
+      putLogsearchEnvAttribute('logsearch_use_external_solr', 'visible', 'false')
+
+    # recommend number of shard
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
+
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
+    # recommend replication factor
+    putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
+    putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    
+    kerberos_authentication_enabled = self.isSecurityEnabled(services)
+    if not kerberos_authentication_enabled:
+       putLogsearchEnvProperty('logsearch_external_solr_kerberos_enabled', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_enabled', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_principal', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_principal', 'visible', 'false')
 
   def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
index b71f4a9..0a0c34f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
@@ -67,6 +67,10 @@
         <type>application-properties</type>
         <name>atlas.authentication.method</name>
       </property>
+      <property>
+        <type>storm-env</type>
+        <name>storm.atlas.hook</name>
+      </property>
     </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 9376fed..cc53ae5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -81,7 +81,8 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "RANGER_KMS": self.recommendRangerKMSConfigurations,
       "STORM": self.recommendStormConfigurations,
-      "SQOOP": self.recommendSqoopConfigurations
+      "SQOOP": self.recommendSqoopConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
@@ -221,6 +222,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     super(HDP23StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
     putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
     putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
+    putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
     putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     # hive_security_authorization == 'ranger'
@@ -260,7 +262,19 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     hive_hooks = [x for x in hive_hooks if x != ""]
     is_atlas_present_in_cluster = "ATLAS" in servicesList
 
+    enable_atlas_hook = False
     if is_atlas_present_in_cluster:
+      putHiveEnvProperty("hive.atlas.hook", "true")
+    else:
+      putHiveEnvProperty("hive.atlas.hook", "false")
+
+    if ('hive-env' in services['configurations']) and ('hive.atlas.hook' in services['configurations']['hive-env']['properties']):
+      if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+        enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'] == "true"
+      elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+        enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       # Append atlas hook if not already present.
       is_atlas_hook_in_config = atlas_hook_class in hive_hooks
       if not is_atlas_hook_in_config:
@@ -756,7 +770,9 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     knox_port = '8443'
     if 'KNOX' in servicesList:
       knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
-      knox_host = knox_hosts[0]
+      if len(knox_hosts) > 0:
+        knox_hosts.sort()
+        knox_host = knox_hosts[0]
       if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
         knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
       putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
@@ -793,14 +809,28 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
   def recommendSqoopConfigurations(self, configurations, clusterData, services, hosts):
     putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+    putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
 
+    enable_atlas_hook = False
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if "ATLAS" in servicesList:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    else:
+      putSqoopEnvProperty("sqoop.atlas.hook", "false")
+
+    if ('sqoop-env' in services['configurations']) and ('sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']):
+      if 'sqoop-env' in configurations and 'sqoop.atlas.hook' in configurations['sqoop-env']['properties']:
+        enable_atlas_hook = configurations['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+      elif 'sqoop-env' in services['configurations'] and 'sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']:
+        enable_atlas_hook = services['configurations']['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
     putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    putStormEnvProperty = self.putProperty(configurations, "storm-env", services)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
 
     if "storm-site" in services["configurations"]:
@@ -816,11 +846,23 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       atlas_is_present = "ATLAS" in servicesList
       atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
       atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+      enable_atlas_hook = False
+
+      if atlas_is_present:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      else:
+        putStormEnvProperty("storm.atlas.hook", "false")
 
-      if atlas_is_present and not atlas_hook_is_set:
+      if ('storm-env' in services['configurations']) and ('storm.atlas.hook' in services['configurations']['storm-env']['properties']):
+        if 'storm-env' in configurations and 'storm.atlas.hook' in configurations['storm-env']['properties']:
+          enable_atlas_hook = configurations['storm-env']['properties']['storm.atlas.hook'] == "true"
+        elif 'storm-env' in services['configurations'] and 'storm.atlas.hook' in services['configurations']['storm-env']['properties']:
+          enable_atlas_hook = services['configurations']['storm-env']['properties']['storm.atlas.hook'] == "true"
+
+      if enable_atlas_hook and not atlas_hook_is_set:
         notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
 
-      if not atlas_is_present and atlas_hook_is_set:
+      if not enable_atlas_hook and atlas_hook_is_set:
         application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
         notifier_plugin_value = ",".join(application_classes) if application_classes else " "
 
@@ -830,6 +872,17 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
         putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
 
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+
+    putFalconEnvProperty = self.putProperty(configurations, "falcon-env", services)
+    enable_atlas_hook = False
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "ATLAS" in servicesList:
+      putFalconEnvProperty("falcon.atlas.hook", "true")
+    else:
+      putFalconEnvProperty("falcon.atlas.hook", "false")
+
   def getServiceConfigurationValidators(self):
     parentValidators = super(HDP23StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
index d824309..d274135 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -416,6 +417,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -633,16 +648,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index 882e78b..8c9414a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -467,6 +468,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -684,16 +699,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index 66c0a70..b7182d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -47,8 +47,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -616,6 +617,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -833,16 +848,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 019c76e..155aaf9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -48,8 +48,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -612,6 +613,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -829,16 +844,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index 4d1b5f1..bb70f6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -435,7 +435,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -457,7 +457,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 02cef57..c8baea1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -436,7 +436,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -458,7 +458,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..24e0193
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,176 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+{% if java_version &lt; 8 %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+
+{% else %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+# Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  {% if is_datanode_max_locked_memory_set %}
+  ulimit -l {{datanode_max_locked_memory}}
+  {% endif %}
+  ulimit -n {{hdfs_user_nofile_limit}}
+fi
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
index b7d7983..b9a7e1e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
@@ -35,8 +35,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -409,6 +410,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -626,16 +641,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index 5183580..2b768ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -43,8 +43,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -580,6 +581,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -797,16 +812,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 2a1ecf7..92ce832 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -48,8 +48,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -567,6 +568,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -784,16 +799,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 201150d..39b7a4d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -420,7 +420,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -442,7 +442,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index cb16953..2fd7a7a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -425,7 +425,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -447,7 +447,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 87ede63..9ddb667 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -44,5 +44,17 @@
         </changes>
       </component>
     </service>
+
+    <service name="SPARK">
+      <component name="LIVY_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs">
+            <type>livy-conf</type>
+            <transfer operation="move" from-key="livy.server.kerberos.keytab" to-key="livy.server.launch.kerberos.keytab" />
+            <transfer operation="move" from-key="livy.server.kerberos.principal" to-key="livy.server.launch.kerberos.principal" />
+          </definition>
+        </changes>
+      </component>
+    </service>
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
index 414ce15..6bca487 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -441,6 +442,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -692,16 +707,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index fc4c8c5..66f872d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -59,6 +60,10 @@
           <function>prepare</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="SPARK" component="LIVY_SERVER" title="Apply config changes for Livy Server">
+        <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs"/>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services">
@@ -436,6 +441,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -687,16 +706,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index eb4309a..1f7c1a8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -761,6 +761,9 @@
         </upgrade>
       </component>
       <component name="LIVY_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs" />
+        </pre-upgrade>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
index ec81f3e..3f619af 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>ACCUMULO</name>
-      <version>1.7.0.2.5</version>
+      <version>1.7.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
index 0b54385..b449388 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>FALCON</name>
-      <version>0.10.0.2.5</version>
+      <version>0.10.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
index 33ceb43..0d7cd1f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>FLUME</name>
-      <version>1.5.2.2.5</version>
+      <version>1.5.2.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
index 0feaa5e..9bc3ee2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>HBASE</name>
-      <version>1.1.2.2.5</version>
+      <version>1.1.2.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
index a3e4a64..1fc7f51 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>HDFS</name>
-      <version>2.7.1.2.5</version>
+      <version>2.7.3.2.6</version>
     </service>
   </services>
 </metainfo>


[23/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
index d187947..e8c4b5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
@@ -1492,7 +1492,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertNotice entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1523,7 +1523,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
+  int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertCurrent entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1553,7 +1553,7 @@ public class AlertsDAO implements Cleanable {
    */
 
   @Transactional
-  private int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     return executeQuery("AlertHistoryEntity.removeInClusterBeforeDate", AlertHistoryEntity.class, clusterId, beforeDateMillis);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
index 04c6030..5748dc9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
@@ -27,15 +27,17 @@ import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 
+import com.google.common.base.Objects;
+
 /**
  * Entity that maps to a cluster config mapping.
  */
-@Table(name = "clusterconfigmapping")
 @Entity
+@Table(name = "clusterconfigmapping")
 @IdClass(ClusterConfigMappingEntityPK.class)
-@NamedQueries({
-  @NamedQuery(name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
-    query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName")})
+@NamedQueries({ @NamedQuery(
+    name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
+    query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName") })
 
 public class ClusterConfigMappingEntity {
 
@@ -192,4 +194,14 @@ public class ClusterConfigMappingEntity {
 
     return true;
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this).add("clusterId", clusterId).add("type", typeName).add("tag",
+        tag).add("selected", selectedInd).add("created", createTimestamp).toString();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
index 58b2e5d..dc71b61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
@@ -101,7 +101,7 @@ public class GroupEntity {
     return ldapGroup == 0 ? Boolean.FALSE : Boolean.TRUE;
   }
 
-  public void setLdapGroup(Boolean ldapGroup) {
+  private void setLdapGroup(Boolean ldapGroup) {
     if (ldapGroup == null) {
       this.ldapGroup = null;
     } else {
@@ -113,8 +113,9 @@ public class GroupEntity {
     return groupType;
   }
 
-  public void setgroupType(GroupType groupType) {
+  public void setGroupType(GroupType groupType) {
     this.groupType = groupType;
+    setLdapGroup(groupType == GroupType.LDAP);
   }
 
   public Set<MemberEntity> getMemberEntities() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 2f7bdd0..4b3237b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -49,10 +49,8 @@ import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
 import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
-import org.apache.ambari.server.security.ClientSecurityType;
 import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.security.ldap.LdapUserGroupMemberDto;
 import org.apache.commons.lang.StringUtils;
@@ -277,7 +275,7 @@ public class Users {
   public synchronized void setGroupLdap(String groupName) throws AmbariException {
     GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
     if (groupEntity != null) {
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
       groupDAO.merge(groupEntity);
     } else {
       throw new AmbariException("Group " + groupName + " doesn't exist");
@@ -435,7 +433,7 @@ public class Users {
     final GroupEntity groupEntity = new GroupEntity();
     groupEntity.setGroupName(groupName);
     groupEntity.setPrincipal(principalEntity);
-    groupEntity.setgroupType(groupType);
+    groupEntity.setGroupType(groupType);
 
     groupDAO.create(groupEntity);
   }
@@ -701,7 +699,7 @@ public class Users {
     final Set<GroupEntity> groupsToBecomeLdap = new HashSet<GroupEntity>();
     for (String groupName : batchInfo.getGroupsToBecomeLdap()) {
       final GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
       allGroups.put(groupEntity.getGroupName(), groupEntity);
       groupsToBecomeLdap.add(groupEntity);
     }
@@ -737,7 +735,7 @@ public class Users {
       final GroupEntity groupEntity = new GroupEntity();
       groupEntity.setGroupName(groupName);
       groupEntity.setPrincipal(principalEntity);
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
 
       allGroups.put(groupEntity.getGroupName(), groupEntity);
       groupsToCreate.add(groupEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index 5459ddb..97280ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -451,7 +451,7 @@ public class ConfigureAction extends AbstractServerAction {
     // of creating a whole new history record since it was already done
     if (!targetStack.equals(currentStack) && targetStack.equals(configStack)) {
       config.setProperties(newValues);
-      config.persist(false);
+      config.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
     }
@@ -570,8 +570,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(Replace replacement: replacements){
       if(isOperationAllowed(cluster, configType, replacement.key,
-          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState))
+          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState)) {
         allowedReplacements.add(replacement);
+      }
     }
 
     return allowedReplacements;
@@ -582,8 +583,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(ConfigurationKeyValue configurationKeyValue: sets){
       if(isOperationAllowed(cluster, configType, configurationKeyValue.key,
-          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState))
+          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState)) {
         allowedSets.add(configurationKeyValue);
+      }
     }
 
     return allowedSets;
@@ -593,14 +595,16 @@ public class ConfigureAction extends AbstractServerAction {
     List<Transfer> allowedTransfers = new ArrayList<>();
     for (Transfer transfer : transfers) {
       String key = "";
-      if(transfer.operation == TransferOperation.DELETE)
+      if(transfer.operation == TransferOperation.DELETE) {
         key = transfer.deleteKey;
-      else
+      } else {
         key = transfer.fromKey;
+      }
 
       if(isOperationAllowed(cluster, configType, key,
-          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState))
+          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState)) {
         allowedTransfers.add(transfer);
+      }
     }
 
     return allowedTransfers;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
index ffa21ab..4833729 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,13 +32,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -78,7 +76,7 @@ public class FixLzoCodecPath extends AbstractServerAction {
         }
       }
       config.setProperties(properties);
-      config.persist(false);
+      config.save();
     }
     if (modifiedProperties.isEmpty()) {
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
index 3a06476..75588d5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
@@ -18,7 +18,9 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,8 +30,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -86,7 +87,7 @@ public class FixOozieAdminUsers extends AbstractServerAction {
     oozieProperties.put(OOZIE_ADMIN_USERS_PROP, newOozieAdminUsers);
 
     oozieConfig.setProperties(oozieProperties);
-    oozieConfig.persist(false);
+    oozieConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
             String.format("Set oozie admin users to %s", newOozieAdminUsers), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
index 7f6d4b1..739dd7e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
@@ -18,7 +18,10 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.math.BigDecimal;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,9 +30,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.math.BigDecimal;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase properties.  This class is only used when moving from
@@ -79,8 +80,9 @@ public class HBaseConfigCalculation extends AbstractServerAction {
                                    "Upper or lower memstore limit setting value is malformed, skipping", "");
     }
 
-    if (lowerLimit.scale() < 2) //make sure result will have at least 2 digits after decimal point
+    if (lowerLimit.scale() < 2) {
       lowerLimit = lowerLimit.setScale(2, BigDecimal.ROUND_HALF_UP);
+    }
     BigDecimal lowerLimitNew = lowerLimit.divide(upperLimit, BigDecimal.ROUND_HALF_UP);
 
     properties.put(NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString());
@@ -90,7 +92,7 @@ public class HBaseConfigCalculation extends AbstractServerAction {
     properties.remove(OLD_LOWER_LIMIT_PROPERTY_NAME);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("%s was set to %s", NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString()), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
index b238bca..fb15555 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase Env content property.
@@ -79,7 +80,7 @@ public class HBaseEnvMaxDirectMemorySizeAction extends AbstractServerAction {
     properties.put(CONTENT_NAME, appendedContent);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("The %s/%s property was appended with %s", SOURCE_CONFIG_TYPE, CONTENT_NAME, APPEND_CONTENT_LINE),"");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
index 0e10160..c5000bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Append hive-env config type with HIVE_HOME and HIVE_CONF_DIR variables if they are absent
@@ -103,7 +104,7 @@ public class HiveEnvClasspathAction extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("Added %s, %s to content at %s", HIVE_CONF_DIR, HIVE_HOME, TARGET_CONFIG_TYPE), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
index 0ade30b..7ebad08 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
@@ -85,7 +85,7 @@ public class HiveZKQuorumConfigAction extends AbstractServerAction {
     hiveSiteProperties.put(HIVE_SITE_ZK_CONNECT_STRING, zookeeperQuorum);
 
     hiveSite.setProperties(hiveSiteProperties);
-    hiveSite.persist(false);
+    hiveSite.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("Successfully set %s and %s in %s", HIVE_SITE_ZK_QUORUM,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
index 4da67ca..9b8a7dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Changes oozie-env during upgrade (adds -Dhdp.version to $HADOOP_OPTS variable)
@@ -67,7 +68,7 @@ public class OozieConfigCalculation extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("Added -Dhdp.version to $HADOOP_OPTS variable at %s", TARGET_CONFIG_TYPE), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
index ff4a20e..8e0161b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
@@ -141,13 +141,13 @@ public class RangerConfigCalculation extends AbstractServerAction {
     targetValues.put("ranger.jpa.audit.jdbc.dialect", dialect);
 
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     config = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);
     targetValues = config.getProperties();
     targetValues.put("ranger_privelege_user_jdbc_url", userJDBCUrl);
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", stdout.toString(), "");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
index ba0da79..c059c9e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
@@ -87,7 +87,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hadoopUser) {
         targetValues.put(RANGER_PLUGINS_HDFS_SERVICE_USER, hadoopUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HDFS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hdfs_user", HADOOP_ENV_CONFIG_TYPE);
@@ -104,7 +104,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hiveUser) {
         targetValues.put(RANGER_PLUGINS_HIVE_SERVICE_USER, hiveUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HIVE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hive_user", HIVE_ENV_CONFIG_TYPE);
@@ -121,7 +121,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != yarnUser) {
         targetValues.put(RANGER_PLUGINS_YARN_SERVICE_USER, yarnUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_YARN_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "yarn_user", YARN_ENV_CONFIG_TYPE);
@@ -138,7 +138,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hbaseUser) {
         targetValues.put(RANGER_PLUGINS_HBASE_SERVICE_USER, hbaseUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HBASE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hbase_user", HBASE_ENV_CONFIG_TYPE);
@@ -155,7 +155,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != knoxUser) {
         targetValues.put(RANGER_PLUGINS_KNOX_SERVICE_USER, knoxUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KNOX_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "knox_user", KNOX_ENV_CONFIG_TYPE);
@@ -190,7 +190,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         }
         targetValues.put(RANGER_PLUGINS_STORM_SERVICE_USER, stormValue);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_STORM_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "storm_user", STORM_ENV_CONFIG_TYPE);
@@ -207,7 +207,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != kafkaUser) {
         targetValues.put(RANGER_PLUGINS_KAFKA_SERVICE_USER, kafkaUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KAFKA_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kafka_user", KAFKA_ENV_CONFIG_TYPE);
@@ -224,7 +224,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != rangerKmsUser) {
         targetValues.put(RANGER_PLUGINS_KMS_SERVICE_USER, rangerKmsUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KMS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kms_user", RANGER_KMS_ENV_CONFIG_TYPE);
@@ -243,10 +243,10 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         if (null != spnegoKeytab) {
           targetValues.put(RANGER_SPNEGO_KEYTAB, spnegoKeytab);
           rangerAdminconfig.setProperties(targetValues);
-          rangerAdminconfig.persist(false);
+          rangerAdminconfig.save();
           sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_KEYTAB);
         } else {
-          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);          
+          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);
         }
 
       } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
index bb88f55..25387cc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
@@ -29,7 +29,6 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 
@@ -83,7 +82,7 @@ public class RangerKmsProxyConfig extends AbstractServerAction {
       targetValues.put(groupProp, "*");
       targetValues.put(hostProp, "*");
       kmsSite.setProperties(targetValues);
-      kmsSite.persist(false);
+      kmsSite.save();
       outputMsg = outputMsg + MessageFormat.format("Successfully added properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);
     } else {
       outputMsg = outputMsg +  MessageFormat.format("Kerberos not enable, not setting proxy properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
index 299a373..b1aa6e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.serveraction.AbstractServerAction;
@@ -89,7 +88,7 @@ public class SparkShufflePropertyConfig extends AbstractServerAction {
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES, newAuxServices);
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS, YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS_VALUE);
       yarnSiteConfig.setProperties(yarnSiteProperties);
-      yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set to %s",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
index feefcaf..d638858 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
@@ -67,7 +67,7 @@ public class YarnConfigCalculation extends AbstractServerAction {
     yarnSiteProperties.put(YARN_RM_ZK_ADDRESS_PROPERTY_NAME, zkServersStr);
     yarnSiteProperties.put(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME, zkServersStr);
     yarnSiteConfig.setProperties(yarnSiteProperties);
-    yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set from %s to %s",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 37f4167..52d39e0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -44,16 +44,19 @@ import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.apache.ambari.server.state.stack.StackMetainfoXml;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Grouping;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
-import com.google.common.collect.ListMultimap;
 import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimaps;
 
+
 /**
  * Stack module which provides all functionality related to parsing and fully
  * resolving stacks from the stack definition.
@@ -831,6 +834,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     if (configPack == null) {
       return;
     }
+
     for (ServiceModule module : serviceModules.values()) {
       File upgradesFolder = module.getModuleInfo().getServiceUpgradesFolder();
       if (upgradesFolder != null) {
@@ -866,12 +870,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
   private UpgradePack getServiceUpgradePack(UpgradePack pack, File upgradesFolder) throws AmbariException {
     File stackFolder = new File(upgradesFolder, stackInfo.getName());
     File versionFolder = new File(stackFolder, stackInfo.getVersion());
+    // !!! relies on the service upgrade pack filename being named the exact same
     File servicePackFile = new File(versionFolder, pack.getName() + ".xml");
+
     LOG.info("Service folder: " + servicePackFile.getAbsolutePath());
-    if (!servicePackFile.exists()) {
-      return null;
+    if (servicePackFile.exists()) {
+      return parseServiceUpgradePack(pack, servicePackFile);
+    } else {
+      UpgradePack child = findServiceUpgradePack(pack, stackFolder);
+
+      return null == child ? null : parseServiceUpgradePack(pack, child);
     }
-    return parseServiceUpgradePack(pack, servicePackFile);
   }
 
   /**
@@ -879,6 +888,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * for any service which specifies its own upgrade.
    */
   private void mergeUpgradePack(UpgradePack pack, List<UpgradePack> servicePacks) throws AmbariException {
+
     List<Grouping> originalGroups = pack.getAllGroups();
     Map<String, List<Grouping>> allGroupMap = new HashMap<>();
     for (Grouping group : originalGroups) {
@@ -886,8 +896,21 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       list.add(group);
       allGroupMap.put(group.name, list);
     }
+
     for (UpgradePack servicePack : servicePacks) {
       for (Grouping group : servicePack.getAllGroups()) {
+
+        /*
+         !!! special case where the service pack is targeted for any version.  When
+         a service UP targets to run after another group, check to make sure that the
+         base UP contains the group.
+         */
+        if (servicePack.isAllTarget() && !allGroupMap.keySet().contains(group.addAfterGroup)) {
+          LOG.warn("Service Upgrade Pack specified after-group of {}, but that is not found in {}",
+              group.addAfterGroup, StringUtils.join(allGroupMap.keySet(), ','));
+          continue;
+        }
+
         if (allGroupMap.containsKey(group.name)) {
           List<Grouping> list = allGroupMap.get(group.name);
           Grouping first = list.get(0);
@@ -903,8 +926,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
           else {
             list.add(group);
           }
-        }
-        else {
+        } else {
           List<Grouping> list = new ArrayList<>();
           list.add(group);
           allGroupMap.put(group.name, list);
@@ -931,15 +953,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    */
   private void orderGroups(List<Grouping> groups, Map<String, Grouping> mergedGroupMap) throws AmbariException {
     Map<String, List<Grouping>> skippedGroups = new HashMap<>();
+
     for (Map.Entry<String, Grouping> entry : mergedGroupMap.entrySet()) {
-      String key = entry.getKey();
       Grouping group = entry.getValue();
+
       if (!groups.contains(group)) {
         boolean added = addGrouping(groups, group);
         if (added) {
           addSkippedGroup(groups, skippedGroups, group);
         } else {
           List<Grouping> tmp = null;
+
           // store the group until later
           if (skippedGroups.containsKey(group.addAfterGroup)) {
             tmp = skippedGroups.get(group.addAfterGroup);
@@ -951,6 +975,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
         }
       }
     }
+
     if (!skippedGroups.isEmpty()) {
       throw new AmbariException("Missing groups: " + skippedGroups.keySet());
     }
@@ -996,6 +1021,50 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
   }
 
   /**
+   * Finds an upgrade pack that:
+   * <ul>
+   *   <li>Is found in the $SERVICENAME/upgrades/$STACKNAME folder</li>
+   *   <li>Matches the same {@link UpgradeType#getType()}as the {@code base} upgrade pack</li>
+   *   <li>Has the {@link UpgradePack#getTarget()} value equals to "*"</li>
+   *   <li>Has the {@link UpgradePack#getTargetStack()} value equals to "*"</li>
+   * </ul>
+   * This method will not attempt to resolve the "most correct" upgrade pack.  For this
+   * feature to work, there should be only one upgrade pack per type.  If more specificity
+   * is required, then follow the convention of $SERVICENAME/upgrades/$STACKNAME/$STACKVERSION/$BASE_FILE_NAME.xml
+   *
+   * @param base the base upgrade pack for a stack
+   * @param upgradeStackDirectory service directory that contains stack upgrade files.
+   * @return an upgrade pack that matches {@code base}
+   */
+  private UpgradePack findServiceUpgradePack(UpgradePack base, File upgradeStackDirectory) {
+    if (!upgradeStackDirectory.exists() || !upgradeStackDirectory.isDirectory()) {
+      return null;
+    }
+
+    File[] upgradeFiles = upgradeStackDirectory.listFiles(StackDirectory.XML_FILENAME_FILTER);
+    if (0 == upgradeFiles.length) {
+      return null;
+    }
+
+    for (File f : upgradeFiles) {
+      try {
+        UpgradePack upgradePack = unmarshaller.unmarshal(UpgradePack.class, f);
+
+        // !!! if the type is the same and the target is "*", then it's good to merge
+        if (upgradePack.isAllTarget() && upgradePack.getType() == base.getType()) {
+          return upgradePack;
+        }
+
+      } catch (Exception e) {
+        LOG.warn("File {} does not appear to be an upgrade pack and will be skipped ({})",
+            f.getAbsolutePath(), e.getMessage());
+      }
+    }
+
+    return null;
+  }
+
+  /**
    * Parses the service specific upgrade file and merges the none order elements
    * (prerequisite check and processing sections).
    */
@@ -1008,12 +1077,24 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       throw new AmbariException("Unable to parse service upgrade file at location: " + serviceFile.getAbsolutePath(), e);
     }
 
-    parent.mergePrerequisiteChecks(pack);
-    parent.mergeProcessing(pack);
+    return parseServiceUpgradePack(parent, pack);
+  }
 
-    return pack;
+  /**
+   * Places prerequisite checks and processing objects onto the parent upgrade pack.
+   *
+   * @param parent  the parent upgrade pack
+   * @param child   the parsed child upgrade pack
+   * @return the child upgrade pack
+   */
+  private UpgradePack parseServiceUpgradePack(UpgradePack parent, UpgradePack child) {
+    parent.mergePrerequisiteChecks(child);
+    parent.mergeProcessing(child);
+
+    return child;
   }
 
+
   /**
    * Process repositories associated with the stack.
    * @throws AmbariException if unable to fully process the stack repositories

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
index 1e494b4..bd9b798 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
@@ -133,6 +133,9 @@ public class ComponentInfo {
 
   private String timelineAppid;
 
+  @XmlElement(name="customFolder")
+  private String customFolder;
+
   public ComponentInfo() {
   }
 
@@ -158,6 +161,7 @@ public class ComponentInfo {
     clientConfigFiles = prototype.clientConfigFiles;
     timelineAppid = prototype.timelineAppid;
     reassignAllowed = prototype.reassignAllowed;
+    customFolder = prototype.customFolder;
   }
 
   public String getName() {
@@ -396,6 +400,14 @@ public class ComponentInfo {
     this.reassignAllowed = reassignAllowed;
   }
 
+  public String getCustomFolder() {
+    return customFolder;
+  }
+
+  public void setCustomFolder(String customFolder) {
+    this.customFolder = customFolder;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
@@ -426,6 +438,7 @@ public class ComponentInfo {
     if (name != null ? !name.equals(that.name) : that.name != null) return false;
     if (clientConfigFiles != null ? !clientConfigFiles.equals(that.clientConfigFiles) :
         that.clientConfigFiles != null) return false;
+    if (customFolder != null ? !customFolder.equals(that.customFolder) : that.customFolder != null) return false;
 
     return true;
   }
@@ -450,6 +463,7 @@ public class ComponentInfo {
     result = 31 * result + (clientConfigFiles != null ? clientConfigFiles.hashCode() : 0);
     // NULL = 0, TRUE = 2, FALSE = 1
     result = 31 * result + (versionAdvertisedField != null ? (versionAdvertisedField.booleanValue() ? 2 : 1) : 0);
+    result = 31 * result + (customFolder != null ? customFolder.hashCode() : 0);
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
index b35aad9..67570f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
@@ -30,8 +30,6 @@ public interface Config {
 
   void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes);
 
-  void setStackId(StackId stackId);
-
   /**
    * @return Config Type
    */
@@ -66,18 +64,6 @@ public interface Config {
   public Map<String, Map<String, String>> getPropertiesAttributes();
 
   /**
-   * Change the version tag
-   * @param versionTag
-   */
-  public void setTag(String versionTag);
-
-  /**
-   * Set config version
-   * @param version
-   */
-  public void setVersion(Long version);
-
-  /**
    * Replace properties with new provided set
    * @param properties Property Map to replace existing one
    */
@@ -110,11 +96,5 @@ public interface Config {
   /**
    * Persist the configuration.
    */
-  public void persist();
-
-  /**
-   * Persist the configuration, optionally creating a new config entity.
-   */
-  public void persist(boolean newConfig);
-
+  public void save();
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
index eaf68aa..d6cd997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
@@ -27,18 +27,20 @@ import com.google.inject.assistedinject.Assisted;
  * Factory for creating configuration objects using {@link Assisted} constructor parameters
  */
 public interface ConfigFactory {
-  
+
   /**
    * Creates a new {@link Config} object using provided values.
    *
    * @param cluster
    * @param type
+   * @param tag
    * @param map
    * @param mapAttributes
    * @return
    */
-  Config createNew(Cluster cluster, String type, Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
-  
+  Config createNew(Cluster cluster, @Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
+
   /**
    * Creates a new {@link Config} object using provided entity
    *
@@ -48,4 +50,16 @@ public interface ConfigFactory {
    */
   Config createExisting(Cluster cluster, ClusterConfigEntity entity);
 
+  /**
+   * Creates a read-only instance of a {@link Config} suitable for returning in
+   * REST responses.
+   *
+   * @param type
+   * @param tag
+   * @param map
+   * @param mapAttributes
+   * @return
+   */
+  Config createReadOnly(@Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 1f52e6a..0a861d8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -18,27 +18,29 @@
 
 package org.apache.ambari.server.state;
 
-import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.annotation.Nullable;
 
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
+import com.google.gson.JsonSyntaxException;
 import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
@@ -49,52 +51,113 @@ public class ConfigImpl implements Config {
    */
   private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
 
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
+
   public static final String GENERATED_TAG_PREFIX = "generatedTag_";
 
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final long configId;
+  private final Cluster cluster;
+  private final StackId stackId;
+  private final String type;
+  private final String tag;
+  private final Long version;
 
-  private Cluster cluster;
-  private StackId stackId;
-  private String type;
-  private volatile String tag;
-  private volatile Long version;
-  private volatile Map<String, String> properties;
-  private volatile Map<String, Map<String, String>> propertiesAttributes;
-  private ClusterConfigEntity entity;
-  private volatile Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+  /**
+   * The properties of this configuration. This cannot be a
+   * {@link ConcurrentMap} since we allow null values. Therefore, it must be
+   * synchronized externally.
+   */
+  private Map<String, String> properties;
 
-  @Inject
-  private ClusterDAO clusterDAO;
+  /**
+   * A lock for reading/writing of {@link #properties} concurrently.
+   *
+   * @see #properties
+   */
+  private final ReadWriteLock propertyLock;
 
-  @Inject
-  private Gson gson;
+  /**
+   * The property attributes for this configuration.
+   */
+  private Map<String, Map<String, String>> propertiesAttributes;
+
+  private Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+
+  private final ClusterDAO clusterDAO;
+
+  private final Gson gson;
 
   @Inject
   private ServiceConfigDAO serviceConfigDAO;
 
-  @Inject
-  private AmbariEventPublisher eventPublisher;
+  private final AmbariEventPublisher eventPublisher;
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted String type, @Assisted Map<String, String> properties,
-      @Assisted Map<String, Map<String, String>> propertiesAttributes, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
     this.type = type;
     this.properties = properties;
-    this.propertiesAttributes = propertiesAttributes;
+
+    // only set this if it's non-null
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    version = cluster.getNextConfigVersion(type);
+
+    // tag is nullable from factory but not in the DB, so ensure we generate something
+    tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+    this.tag = tag;
+
+    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+    ClusterConfigEntity entity = new ClusterConfigEntity();
+    entity.setClusterEntity(clusterEntity);
+    entity.setClusterId(cluster.getClusterId());
+    entity.setType(type);
+    entity.setVersion(version);
+    entity.setTag(this.tag);
+    entity.setTimestamp(System.currentTimeMillis());
+    entity.setStack(clusterEntity.getDesiredStack());
+    entity.setData(gson.toJson(properties));
+
+    if (null != propertiesAttributes) {
+      entity.setAttributes(gson.toJson(propertiesAttributes));
+    }
 
     // when creating a brand new config without a backing entity, use the
     // cluster's desired stack as the config's stack
     stackId = cluster.getDesiredStackVersion();
-
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
-  }
+    persist(entity);
 
+    configId = entity.getConfigId();
+  }
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity,
+      ClusterDAO clusterDAO, Gson gson, AmbariEventPublisher eventPublisher,
+      LockFactory lockFactory) {
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    configId = entity.getConfigId();
+
     type = entity.getType();
     tag = entity.getTag();
     version = entity.getVersion();
@@ -102,16 +165,71 @@ public class ConfigImpl implements Config {
     // when using an existing entity, use the actual value of the entity's stack
     stackId = new StackId(entity.getStack());
 
-    this.entity = entity;
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, String> deserializedProperties = gson.<Map<String, String>> fromJson(
+          entity.getData(), Map.class);
+
+      if (null == deserializedProperties) {
+        deserializedProperties = new HashMap<>();
+      }
+
+      properties = deserializedProperties;
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration JSON stored in the database for {}/{}", entity.getType(),
+          entity.getTag());
+    }
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, Map<String, String>> deserializedAttributes = gson.<Map<String, Map<String, String>>> fromJson(
+          entity.getAttributes(), Map.class);
+
+      if (null != deserializedAttributes) {
+        propertiesAttributes = new HashMap<>(deserializedAttributes);
+      }
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration attribute JSON stored in the database for {}/{}",
+          entity.getType(), entity.getTag());
+    }
   }
 
   /**
-   * Constructor for clients not using factory.
+   * Constructor. This will create an instance suitable only for
+   * representation/serialization as it is incomplete.
+   *
+   * @param type
+   * @param tag
+   * @param properties
+   * @param propertiesAttributes
+   * @param clusterDAO
+   * @param gson
+   * @param eventPublisher
    */
-  public ConfigImpl(String type) {
+  @AssistedInject
+  ConfigImpl(@Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
+    this.tag = tag;
     this.type = type;
+    this.properties = new HashMap<>(properties);
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+
+    cluster = null;
+    configId = 0;
+    version = 0L;
+    stackId = null;
   }
 
   /**
@@ -119,232 +237,124 @@ public class ConfigImpl implements Config {
    */
   @Override
   public StackId getStackId() {
-    readWriteLock.readLock().lock();
-    try {
-      return stackId;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return stackId;
   }
 
   @Override
   public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
-    readWriteLock.readLock().lock();
-    try {
-      return propertiesTypes;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return propertiesTypes;
   }
 
   @Override
   public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesTypes = propertiesTypes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void setStackId(StackId stackId) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.stackId = stackId;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesTypes = propertiesTypes;
   }
 
   @Override
   public String getType() {
-    readWriteLock.readLock().lock();
-    try {
-      return type;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return type;
   }
 
   @Override
   public String getTag() {
-    if (tag == null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (tag == null) {
-          tag = GENERATED_TAG_PREFIX + getVersion();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-
-      return tag;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return tag;
   }
 
   @Override
   public Long getVersion() {
-    if (version == null && cluster != null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (version == null) {
-          version = cluster.getNextConfigVersion(type); //pure DB calculation call, no cluster locking required
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return version;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return version;
   }
 
   @Override
   public Map<String, String> getProperties() {
-    if (null != entity && null == properties) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (properties == null) {
-          properties = gson.<Map<String, String>>fromJson(entity.getData(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
+    propertyLock.readLock().lock();
     try {
-      return null == properties ? new HashMap<String, String>()
-          : new HashMap<String, String>(properties);
+      return properties == null ? new HashMap<String, String>() : new HashMap<>(properties);
     } finally {
-      readWriteLock.readLock().unlock();
+      propertyLock.readLock().unlock();
     }
-
   }
 
   @Override
   public Map<String, Map<String, String>> getPropertiesAttributes() {
-    if (null != entity && null == propertiesAttributes) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (propertiesAttributes == null) {
-          propertiesAttributes = gson.<Map<String, Map<String, String>>>fromJson(entity.getAttributes(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return null == propertiesAttributes ? null : new HashMap<String, Map<String, String>>(propertiesAttributes);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.tag = tag;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setVersion(Long version) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.version = version;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    return null == propertiesAttributes ? null
+        : new HashMap<String, Map<String, String>>(propertiesAttributes);
   }
 
   @Override
   public void setProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+    propertyLock.writeLock().lock();
     try {
       this.properties = properties;
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesAttributes = propertiesAttributes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesAttributes = propertiesAttributes;
   }
 
   @Override
-  public void updateProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+  public void updateProperties(Map<String, String> propertiesToUpdate) {
+    propertyLock.writeLock().lock();
     try {
-      this.properties.putAll(properties);
+      properties.putAll(propertiesToUpdate);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public List<Long> getServiceConfigVersions() {
-    readWriteLock.readLock().lock();
-    try {
-      if (cluster == null || type == null || version == null) {
-        return Collections.emptyList();
-      }
-      return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
   }
 
   @Override
-  public void deleteProperties(List<String> properties) {
-    readWriteLock.writeLock().lock();
+  public void deleteProperties(List<String> propertyKeysToRemove) {
+    propertyLock.writeLock().lock();
     try {
-      for (String key : properties) {
-        this.properties.remove(key);
-      }
+      Set<String> keySet = properties.keySet();
+      keySet.removeAll(propertyKeysToRemove);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
+  }
+
+  /**
+   * Persist the entity and update the internal state relationships once the
+   * transaction has been committed.
+   */
+  private void persist(ClusterConfigEntity entity) {
+    persistEntitiesInTransaction(entity);
 
+    // ensure that the in-memory state of the cluster is kept consistent
+    cluster.addConfig(this);
+
+    // re-load the entity associations for the cluster
+    cluster.refresh();
+
+    // broadcast the change event for the configuration
+    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+        getType(), getTag(), getVersion());
+
+    eventPublisher.publish(event);
   }
 
-  @Override
-  public void persist() {
-    persist(true);
+  /**
+   * Persist the cluster and configuration entities in their own transaction.
+   */
+  @Transactional
+  void persistEntitiesInTransaction(ClusterConfigEntity entity) {
+    ClusterEntity clusterEntity = entity.getClusterEntity();
+
+    clusterDAO.createConfig(entity);
+    clusterEntity.getClusterConfigEntities().add(entity);
+
+    // save the entity, forcing a flush to ensure the refresh picks up the
+    // newest data
+    clusterDAO.merge(clusterEntity, true);
   }
 
   /**
@@ -352,69 +362,29 @@ public class ConfigImpl implements Config {
    */
   @Override
   @Transactional
-  public void persist(boolean newConfig) {
-    readWriteLock.writeLock().lock();
-    try {
-      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-      if (newConfig) {
-        ClusterConfigEntity entity = new ClusterConfigEntity();
-        entity.setClusterEntity(clusterEntity);
-        entity.setClusterId(cluster.getClusterId());
-        entity.setType(getType());
-        entity.setVersion(getVersion());
-        entity.setTag(getTag());
-        entity.setTimestamp(new Date().getTime());
-        entity.setStack(clusterEntity.getDesiredStack());
-        entity.setData(gson.toJson(getProperties()));
-
-        if (null != getPropertiesAttributes()) {
-          entity.setAttributes(gson.toJson(getPropertiesAttributes()));
-        }
-
-        clusterDAO.createConfig(entity);
-        clusterEntity.getClusterConfigEntities().add(entity);
-
-        // save the entity, forcing a flush to ensure the refresh picks up the
-        // newest data
-        clusterDAO.merge(clusterEntity, true);
-      } else {
-        // only supporting changes to the properties
-        ClusterConfigEntity entity = null;
-
-        // find the existing configuration to update
-        for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
-          if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
-              && getVersion().equals(cfe.getVersion())) {
-            entity = cfe;
-            break;
-          }
-        }
-
-        // if the configuration was found, then update it
-        if (null != entity) {
-          LOG.debug(
-              "Updating {} version {} with new configurations; a new version will not be created",
-              getType(), getVersion());
-
-          entity.setData(gson.toJson(getProperties()));
-
-          // save the entity, forcing a flush to ensure the refresh picks up the
-          // newest data
-          clusterDAO.merge(clusterEntity, true);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+  public void save() {
+    ClusterConfigEntity entity = clusterDAO.findConfig(configId);
+    ClusterEntity clusterEntity = clusterDAO.findById(entity.getClusterId());
 
-    // re-load the entity associations for the cluster
-    cluster.refresh();
+    // if the configuration was found, then update it
+    if (null != entity) {
+      LOG.debug("Updating {} version {} with new configurations; a new version will not be created",
+          getType(), getVersion());
 
-    // broadcast the change event for the configuration
-    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
-        getType(), getTag(), getVersion());
+      entity.setData(gson.toJson(getProperties()));
+
+      // save the entity, forcing a flush to ensure the refresh picks up the
+      // newest data
+      clusterDAO.merge(clusterEntity, true);
+
+      // re-load the entity associations for the cluster
+      cluster.refresh();
+
+      // broadcast the change event for the configuration
+      ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+          getType(), getTag(), getVersion());
 
       eventPublisher.publish(event);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index f87b99c..e223eed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -451,7 +451,7 @@ public class ServiceImpl implements Service {
   }
 
   @Transactional
-  private void persistEntities(ClusterServiceEntity serviceEntity) {
+  void persistEntities(ClusterServiceEntity serviceEntity) {
     long clusterId = cluster.getClusterId();
     ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
     serviceEntity.setClusterEntity(clusterEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 7bf24ce..b62c834 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -22,6 +22,7 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -144,8 +145,10 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Functions;
 import com.google.common.base.Predicate;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Maps;
@@ -326,8 +329,11 @@ public class ClusterImpl implements Cluster {
     loadStackVersion();
     loadServices();
     loadServiceHostComponents();
-    loadConfigGroups();
+
+    // cache configurations before loading configuration groups
     cacheConfigurations();
+    loadConfigGroups();
+
     loadRequestExecutions();
 
     if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
@@ -2566,7 +2572,6 @@ public class ClusterImpl implements Cluster {
           }
         }
         configGroup.setHosts(groupDesiredHosts);
-        configGroup.persist();
       } else {
         throw new IllegalArgumentException("Config group {} doesn't exist");
       }
@@ -3070,6 +3075,7 @@ public class ClusterImpl implements Cluster {
    * {@inheritDoc}
    */
   @Override
+  @Transactional
   public void applyLatestConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();
 
@@ -3077,36 +3083,33 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
 
+      // hash them for easier retrieval later - these are the same entity
+      // instances which exist on the cluster entity, so modification of the CCM
+      // entity here will affect the cluster CCM entities as well
+      ImmutableMap<Object, ClusterConfigMappingEntity> ccmMap = Maps.uniqueIndex(configMappingEntities, Functions.identity());
+
       // disable all configs
       for (ClusterConfigMappingEntity e : configMappingEntities) {
         LOG.debug("{} with tag {} is unselected", e.getType(), e.getTag());
         e.setSelected(0);
       }
 
-      List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+      // work through the in-memory list, finding only the most recent mapping per type
+      Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMappingsForStack(
           clusterEntity.getClusterId(), stackId);
 
-      Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMapping(
-          clusterConfigMappingsForStack);
-
-      // loop through all configs and set the latest to enabled for the
-      // specified stack
-      for(ClusterConfigMappingEntity configMappingEntity: configMappingEntities){
-        String type = configMappingEntity.getType();
-        String tag =  configMappingEntity.getTag();
+      for( ClusterConfigMappingEntity latestConfigMapping : latestConfigMappingByStack ){
+        ClusterConfigMappingEntity mapping = ccmMap.get(latestConfigMapping);
+        mapping.setSelected(1);
 
-        for (ClusterConfigMappingEntity latest : latestConfigMappingByStack) {
-          String latestType = latest.getType();
-          String latestTag = latest.getTag();
-
-          // find the latest config of a given mapping entity
-          if (StringUtils.equals(type, latestType) && StringUtils.equals(tag, latestTag)) {
-            LOG.info("{} with version tag {} is selected for stack {}", type, tag, stackId.toString());
-            configMappingEntity.setSelected(1);
-          }
-        }
+        LOG.info("Settting {} with version tag {} created on {} to selected for stack {}",
+            mapping.getType(), mapping.getTag(), new Date(mapping.getCreateTimestamp()),
+            stackId.toString());
       }
 
+      // since the entities which were modified came from the cluster entity's
+      // list to begin with, we can just save them right back - no need for a
+      // new collection since the CCM entity instances were modified directly
       clusterEntity.setConfigMappingEntities(configMappingEntities);
       clusterEntity = clusterDAO.merge(clusterEntity);
       clusterDAO.mergeConfigMappings(configMappingEntities);
@@ -3128,23 +3131,60 @@ public class ClusterImpl implements Cluster {
     jpaEventPublisher.publish(event);
   }
 
-  public Collection<ClusterConfigMappingEntity> getLatestConfigMapping(List<ClusterConfigMappingEntity> clusterConfigMappingEntities){
-    Map<String, ClusterConfigMappingEntity> temp = new HashMap<String, ClusterConfigMappingEntity>();
-    for (ClusterConfigMappingEntity e : clusterConfigMappingEntities) {
-      String type = e.getType();
-      if(temp.containsKey(type)){
-        ClusterConfigMappingEntity entityStored = temp.get(type);
-        Long timestampStored = entityStored.getCreateTimestamp();
-        Long timestamp = e.getCreateTimestamp();
-        if(timestamp > timestampStored){
-          temp.put(type, e); //find a newer config for the given type
-        }
-      } else {
-        temp.put(type, e); //first time encounter a type, add it
+  /**
+   * Retrieves all of the configuration mappings (selected and unselected) for
+   * the specified stack and then iterates through them, returning the most
+   * recent mapping for every type/tag combination.
+   * <p/>
+   * Because of how configuration revert works, mappings can be created for the
+   * same type/tag combinations. The only difference being that the timestamp
+   * reflects when each mapping was created.
+   * <p/>
+   * JPQL cannot be used directly here easily because some databases cannot
+   * support the necessary grouping and IN clause. For example: <br/>
+   *
+   * <pre>
+   * SELECT mapping FROM clusterconfigmappingentity mapping
+   *   WHERE (mapping.typename, mapping.createtimestamp) IN
+   *     (SELECT latest.typename, MAX(latest.createtimestamp)
+   *      FROM clusterconfigmappingentity latest
+   *      GROUP BY latest.typename)
+   * </pre>
+   *
+   * @param clusterId
+   *          the cluster ID
+   * @param stackId
+   *          the stack to retrieve the mappings for (not {@code null}).
+   * @return the most recent mapping (selected or unselected) for the specified
+   *         stack for every type.
+   */
+  public Collection<ClusterConfigMappingEntity> getLatestConfigMappingsForStack(long clusterId,
+      StackId stackId) {
+
+    // get all mappings for the specified stack (which could include
+    // duplicates since a config revert creates a duplicate mapping with a
+    // different timestamp)
+    List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+        clusterId, stackId);
+
+    Map<String, ClusterConfigMappingEntity> latestMappingsByType = new HashMap<String, ClusterConfigMappingEntity>();
+    for (ClusterConfigMappingEntity mapping : clusterConfigMappingsForStack) {
+      String type = mapping.getType();
+
+      if (!latestMappingsByType.containsKey(type)) {
+        latestMappingsByType.put(type, mapping);
+        continue;
+      }
+
+      ClusterConfigMappingEntity entityStored = latestMappingsByType.get(type);
+      Long timestampStored = entityStored.getCreateTimestamp();
+      Long timestamp = mapping.getCreateTimestamp();
+      if (timestamp > timestampStored) {
+        latestMappingsByType.put(type, mapping);
       }
     }
 
-    return temp.values();
+    return latestMappingsByType.values();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 8e5f37e..1de10f9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -183,7 +183,7 @@ public class ClustersImpl implements Clusters {
    */
   @Inject
   @Transactional
-  private void loadClustersAndHosts() {
+  void loadClustersAndHosts() {
     List<HostEntity> hostEntities = hostDAO.findAll();
     for (HostEntity hostEntity : hostEntities) {
       Host host = hostFactory.create(hostEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 1b29c9b..5a9c574 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -18,13 +18,13 @@
 
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Map;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
 
-import java.util.Map;
-
 /**
  * Configuration group or Config group is a type of Ambari resource that
  * supports grouping of configuration resources and host resources for a
@@ -80,29 +80,20 @@ public interface ConfigGroup {
   public void setDescription(String description);
 
   /**
-   * List of hosts to which configs are applied
+   * Gets an unmodifiable list of {@link Host}s.
+   *
    * @return
    */
   public Map<Long, Host> getHosts();
 
   /**
-   * List of @Config objects
+   * Gets an unmodifiable map of {@link Config}s.
+   *
    * @return
    */
   public Map<String, Config> getConfigurations();
 
   /**
-   * Persist the Config group along with the related host and config mapping
-   * entities to the persistence store
-   */
-  void persist();
-
-  /**
-   * Persist the host mapping entity to the persistence store
-   */
-  void persistHostMapping();
-
-  /**
    * Delete config group and the related host and config mapping
    * entities from the persistence store
    */
@@ -116,13 +107,6 @@ public interface ConfigGroup {
   public void addHost(Host host) throws AmbariException;
 
   /**
-   * Add config to the config group
-   * @param config
-   * @throws AmbariException
-   */
-  public void addConfiguration(Config config) throws AmbariException;
-
-  /**
    * Return @ConfigGroupResponse for the config group
    *
    * @return @ConfigGroupResponse
@@ -131,11 +115,6 @@ public interface ConfigGroup {
   public ConfigGroupResponse convertToResponse() throws AmbariException;
 
   /**
-   * Refresh Config group and the host and config mappings for the group
-   */
-  public void refresh();
-
-  /**
    * Reassign the set of hosts associated with this config group
    * @param hosts
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 9abadf3..906d948 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -17,22 +17,38 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
-import com.google.inject.assistedinject.Assisted;
+import java.util.Map;
+
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.configgroup.ConfigGroup;
 
-import java.util.Map;
+import com.google.inject.assistedinject.Assisted;
 
 public interface ConfigGroupFactory {
-  ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
-                       @Assisted("name") String name,
-                       @Assisted("tag") String tag,
-                       @Assisted("description") String description,
-                       @Assisted("configs") Map<String, Config> configs,
-                       @Assisted("hosts") Map<Long, Host> hosts);
+  /**
+   * Creates and saves a new {@link ConfigGroup}.
+   *
+   * @param cluster
+   * @param name
+   * @param tag
+   * @param description
+   * @param configs
+   * @param hosts
+   * @param serviceName
+   * @return
+   */
+  ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
 
+  /**
+   * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
+   *
+   * @param cluster
+   * @param entity
+   * @return
+   */
   ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
 }


[09/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 7ab2856..92fa084 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -132,13 +132,10 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -196,16 +193,13 @@ public class ConfigureActionTest {
 
     // create a config for zoo.cfg with two values; one is a stack value and the
     // other is custom
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("tickTime", "2000");
         put("foo", "bar");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -262,16 +256,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copyIt", "10");
           put("moveIt", "10");
           put("deleteIt", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -402,15 +393,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("zoo.server.csv", "c6401,c6402,  c6403");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -468,16 +456,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("key_to_replace", "My New Cat");
         put("key_with_no_match", "WxyAndZ");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -543,16 +528,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("existing", "This exists!");
         put("missing", null);
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -604,16 +586,12 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("fooKey", "barValue");
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -671,7 +649,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -680,10 +658,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -769,7 +743,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -778,10 +752,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -855,7 +825,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -865,10 +835,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -951,7 +917,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -961,10 +927,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1041,15 +1003,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1157,15 +1116,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1253,17 +1209,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1362,17 +1315,15 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2",
+        new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1466,17 +1417,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1567,17 +1515,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1674,15 +1619,12 @@ public class ConfigureActionTest {
     // service properties will not run!
     installService(c, "ZOOKEEPER");
 
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version1", new HashMap<String, String>() {
       {
         put("initLimit", "10");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
 
     // add a host component

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
index 314e955..d4c90b8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -25,18 +34,11 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.inject.Injector;
 
 /**
  * Tests OozieConfigCalculation logic
@@ -53,52 +55,28 @@ public class FixOozieAdminUsersTest {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("falcon_user", "falcon");
+    }};
+
+    Config falconEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(falconEnvConfig.getType()).andReturn("falcon-env").anyTimes();
+    expect(falconEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>() {{
+      put("oozie_admin_users", "oozie, oozie-admin");
+    }};
+
+    Config oozieEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(oozieEnvConfig.getType()).andReturn("oozie-env").anyTimes();
+    expect(oozieEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-    Config falconEnvConfig = new ConfigImpl("falcon-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("falcon_user", "falcon");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-    Config oozieEnvConfig = new ConfigImpl("oozie-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("oozie_admin_users", "oozie, oozie-admin");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
     expect(cluster.getDesiredConfigByType("falcon-env")).andReturn(falconEnvConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("oozie-env")).andReturn(oozieEnvConfig).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
-    replay(injector, clusters);
+    replay(injector, clusters, falconEnvConfig, oozieEnvConfig);
 
     clustersField = FixOozieAdminUsers.class.getDeclaredField("clusters");
     clustersField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
index 4c1d7a3..f8a5373 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,21 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -55,99 +57,86 @@ public class HBaseEnvMaxDirectMemorySizeActionTest {
     injector = EasyMock.createMock(Injector.class);
     clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
-
-    Config hbaseEnv = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content","# Set environment variables here.\n" +
-          "\n" +
-          "# The java implementation to use. Java 1.6 required.\n" +
-          "export JAVA_HOME={{java64_home}}\n" +
-          "\n" +
-          "# HBase Configuration directory\n" +
-          "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
-          "\n" +
-          "# Extra Java CLASSPATH elements. Optional.\n" +
-          "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-          "\n" +
-          "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-          "# export HBASE_HEAPSIZE=1000\n" +
-          "\n" +
-          "# Extra Java runtime options.\n" +
-          "# Below are what we set by default. May only work with SUN JVM.\n" +
-          "# For more on why as well as other possible settings,\n" +
-          "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
-          "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-          "# Uncomment below to enable java garbage collection logging.\n" +
-          "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "\n" +
-          "# Uncomment and adjust to enable JMX exporting\n" +
-          "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
-          "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
-          "#\n" +
-          "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
-          "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
-          "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
-          "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
-          "\n" +
-          "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
-          "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
-          "\n" +
-          "# Extra ssh options. Empty by default.\n" +
-          "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
-          "\n" +
-          "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
-          "export HBASE_LOG_DIR={{log_dir}}\n" +
-          "\n" +
-          "# A string representing this instance of hbase. $USER by default.\n" +
-          "# export HBASE_IDENT_STRING=$USER\n" +
-          "\n" +
-          "# The scheduling priority for daemon processes. See 'man nice'.\n" +
-          "# export HBASE_NICENESS=10\n" +
-          "\n" +
-          "# The directory where pid files are stored. /tmp by default.\n" +
-          "export HBASE_PID_DIR={{pid_dir}}\n" +
-          "\n" +
-          "# Seconds to sleep between slave commands. Unset by default. This\n" +
-          "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
-          "# otherwise arrive faster than the master can service them.\n" +
-          "# export HBASE_SLAVE_SLEEP=0.1\n" +
-          "\n" +
-          "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
-          "export HBASE_MANAGES_ZK=false\n" +
-          "\n" +
-          "{% if security_enabled %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
-          "{% else %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
-          "{% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Config hbaseEnv = EasyMock.createNiceMock(Config.class);
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content","# Set environment variables here.\n" +
+        "\n" +
+        "# The java implementation to use. Java 1.6 required.\n" +
+        "export JAVA_HOME={{java64_home}}\n" +
+        "\n" +
+        "# HBase Configuration directory\n" +
+        "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
+        "\n" +
+        "# Extra Java CLASSPATH elements. Optional.\n" +
+        "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
+        "\n" +
+        "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+        "# export HBASE_HEAPSIZE=1000\n" +
+        "\n" +
+        "# Extra Java runtime options.\n" +
+        "# Below are what we set by default. May only work with SUN JVM.\n" +
+        "# For more on why as well as other possible settings,\n" +
+        "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
+        "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
+        "# Uncomment below to enable java garbage collection logging.\n" +
+        "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "\n" +
+        "# Uncomment and adjust to enable JMX exporting\n" +
+        "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
+        "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
+        "#\n" +
+        "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
+        "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
+        "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
+        "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
+        "\n" +
+        "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
+        "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
+        "\n" +
+        "# Extra ssh options. Empty by default.\n" +
+        "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
+        "\n" +
+        "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
+        "export HBASE_LOG_DIR={{log_dir}}\n" +
+        "\n" +
+        "# A string representing this instance of hbase. $USER by default.\n" +
+        "# export HBASE_IDENT_STRING=$USER\n" +
+        "\n" +
+        "# The scheduling priority for daemon processes. See 'man nice'.\n" +
+        "# export HBASE_NICENESS=10\n" +
+        "\n" +
+        "# The directory where pid files are stored. /tmp by default.\n" +
+        "export HBASE_PID_DIR={{pid_dir}}\n" +
+        "\n" +
+        "# Seconds to sleep between slave commands. Unset by default. This\n" +
+        "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
+        "# otherwise arrive faster than the master can service them.\n" +
+        "# export HBASE_SLAVE_SLEEP=0.1\n" +
+        "\n" +
+        "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
+        "export HBASE_MANAGES_ZK=false\n" +
+        "\n" +
+        "{% if security_enabled %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
+        "{% else %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
+        "{% endif %}");
+    }};
+
+    expect(hbaseEnv.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hbase-env")).andReturn(hbaseEnv).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
 
-    replay(injector, clusters, cluster);
+    replay(injector, clusters, cluster, hbaseEnv);
 
     m_clusterField = HBaseEnvMaxDirectMemorySizeAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
index 9bde631..8926203 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,22 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -57,79 +58,66 @@ public class HiveEnvClasspathActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hiveEnv = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
-          "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
-          "      if [ -z \"$DEBUG\" ]; then\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
-          "      else\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
-          "      fi\n" +
-          "      fi\n" +
-          "\n" +
-          "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-          "\n" +
-          "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-          "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-          "      else\n" +
-          "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-          "      fi\n" +
-          "\n" +
-          "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-          "\n" +
-          "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
-          "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
-          "      # appropriate for hive server (hwi etc).\n" +
-          "\n" +
-          "\n" +
-          "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
-          "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
-          "\n" +
-          "      # Hive Configuration Directory can be controlled by:\n" +
-          "      export HIVE_CONF_DIR=test\n" +
-          "\n" +
-          "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
-          "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-          "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "\n" +
-          "      export METASTORE_PORT={{hive_metastore_port}}\n" +
-          "\n" +
-          "      {% if sqla_db_used or lib_dir_available %}\n" +
-          "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      {% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
+        "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
+        "      if [ -z \"$DEBUG\" ]; then\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
+        "      else\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
+        "      fi\n" +
+        "      fi\n" +
+        "\n" +
+        "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+        "\n" +
+        "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+        "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
+        "      else\n" +
+        "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+        "      fi\n" +
+        "\n" +
+        "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+        "\n" +
+        "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
+        "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
+        "      # appropriate for hive server (hwi etc).\n" +
+        "\n" +
+        "\n" +
+        "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
+        "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
+        "\n" +
+        "      # Hive Configuration Directory can be controlled by:\n" +
+        "      export HIVE_CONF_DIR=test\n" +
+        "\n" +
+        "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
+        "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
+        "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "\n" +
+        "      export METASTORE_PORT={{hive_metastore_port}}\n" +
+        "\n" +
+        "      {% if sqla_db_used or lib_dir_available %}\n" +
+        "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      {% endif %}");
+    }};
+
+    Config hiveEnv = EasyMock.createNiceMock(Config.class);
+    expect(hiveEnv.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveEnv).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hiveEnv);
 
     m_clusterField = HiveEnvClasspathAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
index 907194c..cd5eb9d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
@@ -91,7 +91,7 @@ public class HiveZKQuorumConfigActionTest {
     m_hiveSiteConfig.setProperties(EasyMock.anyObject(Map.class));
     EasyMock.expectLastCall().once();
 
-    m_hiveSiteConfig.persist(false);
+    m_hiveSiteConfig.save();
     EasyMock.expectLastCall().once();
 
     EasyMock.expect(m_cluster.getDesiredConfigByType(HiveZKQuorumConfigAction.HIVE_SITE_CONFIG_TYPE)).andReturn(m_hiveSiteConfig).atLeastOnce();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
index d374d75..d18f727 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMock;
@@ -65,26 +64,13 @@ public class KerberosKeytabsActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     m_kerberosHelper = EasyMock.createMock(KerberosHelper.class);
 
-    m_kerberosConfig = new ConfigImpl("kerberos-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kerberos-env", "");
-      }};
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("kerberos-env", "");
+    }};
 
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    m_kerberosConfig = EasyMock.createNiceMock(Config.class);
+    expect(m_kerberosConfig.getType()).andReturn("kerberos-env").anyTimes();
+    expect(m_kerberosConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
@@ -92,7 +78,7 @@ public class KerberosKeytabsActionTest {
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
 
-    replay(m_clusters, cluster);
+    replay(m_clusters, cluster, m_kerberosConfig);
 
     m_injector = Guice.createInjector(new AbstractModule() {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
index e673714..7a6a6c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
@@ -35,7 +35,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -57,54 +56,27 @@ public class RangerConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config adminConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("DB_FLAVOR", "MYSQL");
-        put("db_host", "host1");
-        put("db_name", "ranger");
-        put("audit_db_name", "ranger_audit");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("DB_FLAVOR", "MYSQL");
+      put("db_host", "host1");
+      put("db_name", "ranger");
+      put("audit_db_name", "ranger_audit");
+    }};
+
+    Config adminConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
+
 
     expect(cluster.getDesiredConfigByType("admin-properties")).andReturn(adminConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("ranger-admin-site")).andReturn(adminSiteConfig).atLeastOnce();
@@ -113,7 +85,7 @@ public class RangerConfigCalculationTest {
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, adminConfig, adminSiteConfig, rangerEnv);
 
     m_clusterField = RangerConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
index 25acb45..06092c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Field;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -34,9 +35,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,124 +59,50 @@ public class RangerKerberosConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hadoopConfig = new ConfigImpl("hadoop-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hdfs_user", "hdfs");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-
-    Config hiveConfig = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hive_user", "hive");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config yarnConfig = new ConfigImpl("yarn-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn_user", "yarn");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hbaseConfig = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hbase_user", "hbase");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config knoxConfig = new ConfigImpl("knox-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("knox_user", "knox");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config stormConfig = new ConfigImpl("storm-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("storm_user", "storm");
-        put("storm_principal_name", "storm-c1@EXAMLE.COM");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kafkaConfig = new ConfigImpl("kafka-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kafka_user", "kafka");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kmsConfig = new ConfigImpl("kms-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kms_user", "kms");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hdfsSiteConfig = new ConfigImpl("hdfs-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("ranger-admin-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config hadoopConfig = EasyMock.createNiceMock(Config.class);
+    expect(hadoopConfig.getType()).andReturn("hadoop-env").anyTimes();
+    expect(hadoopConfig.getProperties()).andReturn(Collections.singletonMap("hdfs_user", "hdfs")).anyTimes();
+
+    Config hiveConfig = EasyMock.createNiceMock(Config.class);
+    expect(hiveConfig.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveConfig.getProperties()).andReturn(Collections.singletonMap("hive_user", "hive")).anyTimes();
+
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-env").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(Collections.singletonMap("yarn_user", "yarn")).anyTimes();
+
+    Config hbaseConfig = EasyMock.createNiceMock(Config.class);
+    expect(hbaseConfig.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseConfig.getProperties()).andReturn(Collections.singletonMap("hbase_user", "hbase")).anyTimes();
+
+    Config knoxConfig = EasyMock.createNiceMock(Config.class);
+    expect(knoxConfig.getType()).andReturn("knox-env").anyTimes();
+    expect(knoxConfig.getProperties()).andReturn(Collections.singletonMap("knox_user", "knox")).anyTimes();
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("storm_user", "storm");
+      put("storm_principal_name", "storm-c1@EXAMLE.COM");
+    }};
+
+    Config stormConfig = EasyMock.createNiceMock(Config.class);
+    expect(stormConfig.getType()).andReturn("storm-env").anyTimes();
+    expect(stormConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config kafkaConfig = EasyMock.createNiceMock(Config.class);
+    expect(kafkaConfig.getType()).andReturn("kafka-env").anyTimes();
+    expect(kafkaConfig.getProperties()).andReturn(Collections.singletonMap("kafka_user", "kafka")).anyTimes();
+
+    Config kmsConfig = EasyMock.createNiceMock(Config.class);
+    expect(kmsConfig.getType()).andReturn("kms-env").anyTimes();
+    expect(kmsConfig.getProperties()).andReturn(Collections.singletonMap("kms_user", "kms")).anyTimes();
+
+    Config hdfsSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(hdfsSiteConfig.getType()).andReturn("hdfs-site").anyTimes();
+    expect(hdfsSiteConfig.getProperties()).andReturn(Collections.singletonMap("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab")).anyTimes();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("ranger-admin-site").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(new HashMap<String,String>()).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hadoop-env")).andReturn(hadoopConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveConfig).atLeastOnce();
@@ -193,7 +119,8 @@ public class RangerKerberosConfigCalculationTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hadoopConfig, hiveConfig, yarnConfig, hbaseConfig,
+        knoxConfig, stormConfig, kafkaConfig, kmsConfig, hdfsSiteConfig, adminSiteConfig);
 
     m_clusterField = RangerKerberosConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
@@ -236,7 +163,7 @@ public class RangerKerberosConfigCalculationTest {
     assertTrue(map.containsKey("ranger.plugins.storm.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kafka.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kms.serviceuser"));
-    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));    
+    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));
 
 
     assertEquals("hdfs", map.get("ranger.plugins.hdfs.serviceuser"));
@@ -254,4 +181,4 @@ public class RangerKerberosConfigCalculationTest {
 
   }
 
-} 
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
index e000c65..7a0d66f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
@@ -34,9 +34,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,34 +54,19 @@ public class RangerKmsProxyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
+    Map<String, String> mockProperties = new HashMap<String, String>() {
+      {
         put("ranger_user", "ranger");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
       }
     };
 
-    Config kmsSite = new ConfigImpl("kms-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config kmsSite = EasyMock.createNiceMock(Config.class);
+    expect(kmsSite.getType()).andReturn("kms-site").anyTimes();
+    expect(kmsSite.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("ranger-env")).andReturn(rangerEnv).atLeastOnce();
     expect(cluster.getDesiredConfigByType("kms-site")).andReturn(kmsSite).atLeastOnce();
@@ -90,7 +74,7 @@ public class RangerKmsProxyConfigTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, rangerEnv, kmsSite);
 
     m_clusterField = RangerKmsProxyConfig.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
index e65a824..518ab42 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,33 +57,20 @@ public class SparkShufflePropertyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("yarn.nodemanager.aux-services", "some_service");
+    }};
 
-    Config adminConfig = new ConfigImpl("yarn-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn.nodemanager.aux-services", "some_service");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-site").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(adminConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(yarnConfig).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters);
+    replay(m_injector, m_clusters, yarnConfig);
 
     clusterField = SparkShufflePropertyConfig.class.getDeclaredField("clusters");
     clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 8f9d4f4..262b10a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -67,7 +67,7 @@ import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -153,6 +153,8 @@ public class UpgradeActionTest {
   private AmbariMetaInfo ambariMetaInfo;
   @Inject
   private FinalizeUpgradeAction finalizeUpgradeAction;
+  @Inject
+  private ConfigFactory configFactory;
 
   @Before
   public void setup() throws Exception {
@@ -1043,24 +1045,22 @@ public class UpgradeActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "zookeeper-env", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "zookeeper-env", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("zookeeper_a", "value_1");
     properties.put("zookeeper_b", "value_2");
 
-    Config c2 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("hdfs_a", "value_3");
     properties.put("hdfs_b", "value_4");
 
-    Config c3 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c4 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    cluster.addConfig(c4);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-    c4.persist();
+    configFactory.createNew(cluster, "core-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
+    configFactory.createNew(cluster, "foo-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index 80665a5..f55bf62 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -89,8 +89,7 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("a", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = configFactory.createNew(cluster, "hdfs-site", properties, propertiesAttributes);
-    config.setTag("testversion");
+    Config config = configFactory.createNew(cluster, "hdfs-site", "testversion", properties, propertiesAttributes);
 
     Host host = clusters.getHost("h1");
 
@@ -103,7 +102,6 @@ public class ConfigGroupTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "cg-test",
       "HDFS", "New HDFS configs for h1", configs, hosts);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
     return configGroup;
   }
@@ -154,28 +152,28 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("key1", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = new ConfigImpl("test-site");
-    config.setProperties(properties);
-    config.setPropertiesAttributes(propertiesAttributes);
-    config.setTag("version100");
 
-    configGroup.addConfiguration(config);
+    Config config = configFactory.createNew(cluster, "test-site", "version100", properties, propertiesAttributes);
+    Map<String, Config> newConfigurations = new HashMap<>(configGroup.getConfigurations());
+    newConfigurations.put(config.getType(), config);
+
+    configGroup.setConfigurations(newConfigurations);
     Assert.assertEquals(2, configGroup.getConfigurations().values().size());
 
+    // re-request it and verify that the config was added
+    configGroupEntity = configGroupDAO.findById(configGroup.getId());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
+
     configGroup.setName("NewName");
     configGroup.setDescription("NewDesc");
     configGroup.setTag("NewTag");
 
     // Save
-    configGroup.persist();
-    configGroup.refresh();
     configGroupEntity = configGroupDAO.findByName("NewName");
 
     Assert.assertNotNull(configGroupEntity);
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupHostMappingEntities().size());
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupConfigMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupHostMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
     Assert.assertEquals("NewTag", configGroupEntity.getTag());
     Assert.assertEquals("NewDesc", configGroupEntity.getDescription());
     Assert.assertNotNull(cluster.getConfig("test-site", "version100"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index d50c92d..526e462 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -38,7 +38,6 @@ import javax.persistence.EntityManager;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.ClusterRequest;
@@ -88,6 +87,7 @@ public class ConfigHelperTest {
     private static ConfigHelper configHelper;
     private static AmbariManagementController managementController;
     private static AmbariMetaInfo metaInfo;
+    private static ConfigFactory configFactory;
 
     @BeforeClass
     public static void setup() throws Exception {
@@ -102,6 +102,7 @@ public class ConfigHelperTest {
       configHelper = injector.getInstance(ConfigHelper.class);
       managementController = injector.getInstance(AmbariManagementController.class);
       metaInfo = injector.getInstance(AmbariMetaInfo.class);
+      configFactory = injector.getInstance(ConfigFactory.class);
 
       clusterName = "c1";
       clusters.addCluster(clusterName, new StackId("HDP-2.0.6"));
@@ -251,7 +252,6 @@ public class ConfigHelperTest {
       LOG.info("Config group created with tag " + tag);
       configGroup.setTag(tag);
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
 
       return configGroup.getId();
@@ -339,14 +339,11 @@ public class ConfigHelperTest {
         add(clusterRequest6);
       }}, null);
 
-      final Config config = new ConfigImpl("ams-env");
-      config.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config.setProperties(properties);
 
+      final Config config = configFactory.createNew(cluster, "ams-env", "version122", properties, null);
       Long groupId = addConfigGroup("g1", "t1", new ArrayList<String>() {{
         add("h1");
       }}, new ArrayList<Config>() {{
@@ -419,19 +416,14 @@ public class ConfigHelperTest {
         add(clusterRequest3);
       }}, null);
 
-      final Config config1 = new ConfigImpl("core-site2");
-      config1.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config1.setProperties(properties);
+      final Config config1 = configFactory.createNew(cluster, "core-site2", "version122", properties, null);
 
-      final Config config2 = new ConfigImpl("global2");
-      config2.setTag("version122");
       Map<String, String> properties2 = new HashMap<String, String>();
       properties2.put("namenode_heapsize", "1111");
-      config2.setProperties(properties2);
+      final Config config2 = configFactory.createNew(cluster, "global2", "version122", properties2, null);
 
       Long groupId = addConfigGroup("g2", "t1", new ArrayList<String>() {{
         add("h1");
@@ -511,24 +503,23 @@ public class ConfigHelperTest {
       }}, null);
 
 
-      final Config config1 = new ConfigImpl("core-site3");
-      config1.setTag("version122");
-
       Map<String, String> attributes = new HashMap<String, String>();
       attributes.put("fs.trash.interval", "11");
       attributes.put("b", "y");
       Map<String, Map<String, String>> config1Attributes = new HashMap<String, Map<String, String>>();
       config1Attributes.put("attribute1", attributes);
-      config1.setPropertiesAttributes(config1Attributes);
 
-      final Config config2 = new ConfigImpl("global3");
-      config2.setTag("version122");
+      final Config config1 = configFactory.createNew(cluster, "core-site3", "version122",
+          new HashMap<String, String>(), config1Attributes);
+
       attributes = new HashMap<String, String>();
       attributes.put("namenode_heapsize", "z");
       attributes.put("c", "q");
       Map<String, Map<String, String>> config2Attributes = new HashMap<String, Map<String, String>>();
       config2Attributes.put("attribute2", attributes);
-      config2.setPropertiesAttributes(config2Attributes);
+
+      final Config config2 = configFactory.createNew(cluster, "global3", "version122",
+          new HashMap<String, String>(), config2Attributes);
 
       Long groupId = addConfigGroup("g3", "t1", new ArrayList<String>() {{
         add("h3");
@@ -690,7 +681,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -718,7 +710,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -744,7 +737,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, null, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, null);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -772,7 +766,8 @@ public class ConfigHelperTest {
       confGroupFinalAttrs.put("b", "true");
       confGroupAttributes.put("final", confGroupFinalAttrs);
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", null, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", "version122",
+          new HashMap<String,String>(), confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -921,8 +916,10 @@ public class ConfigHelperTest {
       List<String> hosts = new ArrayList<String>();
       hosts.add("h1");
       List<Config> configs = new ArrayList<Config>();
-      ConfigImpl configImpl = new ConfigImpl("flume-conf");
-      configImpl.setTag("FLUME1");
+
+      Config configImpl = configFactory.createNew(cluster, "flume-conf", "FLUME1",
+          new HashMap<String,String>(), null);
+
       configs.add(configImpl);
       addConfigGroup("configGroup1", "FLUME", hosts, configs);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 6b4b85a..ea1f18a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import java.lang.reflect.Field;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -1921,25 +1922,27 @@ public class UpgradeHelperTest {
     ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
     ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
 
-    UpgradePack upgradePack = new UpgradePack() {
-      @Override
-      public List<Grouping> getGroups(Direction direction) {
-        HostOrderItem hostItem = new HostOrderItem(HostOrderActionType.HOST_UPGRADE,
-            Lists.newArrayList("h1", "h2"));
-        HostOrderItem checkItem = new HostOrderItem(HostOrderActionType.SERVICE_CHECK,
-            Lists.newArrayList("ZOOKEEPER", "STORM"));
-
-        Grouping g = new HostOrderGrouping();
-        ((HostOrderGrouping) g).setHostOrderItems(Lists.newArrayList(hostItem, checkItem));
-        g.title = "Some Title";
-        return Lists.newArrayList(g);
-      }
+    // !!! make a custom grouping
+    HostOrderItem hostItem = new HostOrderItem(HostOrderActionType.HOST_UPGRADE,
+        Lists.newArrayList("h1", "h2"));
+    HostOrderItem checkItem = new HostOrderItem(HostOrderActionType.SERVICE_CHECK,
+        Lists.newArrayList("ZOOKEEPER", "STORM"));
+
+    Grouping g = new HostOrderGrouping();
+    ((HostOrderGrouping) g).setHostOrderItems(Lists.newArrayList(hostItem, checkItem));
+    g.title = "Some Title";
+
+    UpgradePack upgradePack = new UpgradePack();
+
+    // !!! set the groups directly; allow the logic in getGroups(Direction) to happen
+    Field field = UpgradePack.class.getDeclaredField("groups");
+    field.setAccessible(true);
+    field.set(upgradePack, Lists.newArrayList(g));
+
+    field = UpgradePack.class.getDeclaredField("type" );
+    field.setAccessible(true);
+    field.set(upgradePack, UpgradeType.HOST_ORDERED);
 
-      @Override
-      public Map<String, Map<String, ProcessingComponent>> getTasks() {
-        return new HashMap<>();
-      }
-    };
 
     MasterHostResolver resolver = new MasterHostResolver(m_configHelper, c);
     UpgradeContext context = new UpgradeContext(c, UpgradeType.HOST_ORDERED, Direction.UPGRADE, new HashMap<String, Object>());

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
index 1867bda..ede94dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
@@ -56,12 +56,12 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
-import org.junit.experimental.categories.Category;
 
 /**
  * Tests the {@link AlertReceivedListener}.
@@ -835,17 +835,13 @@ public class AlertReceivedListenerTest {
   @SuppressWarnings("serial")
   public void testAlertFirmnessUsingGlobalValueHigherThanOverride() throws Exception {
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV,
+    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV, "version2",
         new HashMap<String, String>() {
           {
             put(ConfigHelper.CLUSTER_ENV_ALERT_REPEAT_TOLERANCE, "3");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    m_cluster.addConfig(config);
     m_cluster.addDesiredConfig("user", Collections.singleton(config));
 
     String definitionName = ALERT_DEFINITION + "1";

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index 4fdcc22..9dc405e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -124,14 +124,11 @@ public class ClusterDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    config1.persist();
-    config2.persist();
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
+
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     // 100 hosts
@@ -186,7 +183,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -221,7 +218,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -267,7 +264,7 @@ public class ClusterDeadlockTest {
       clusterWriterThread.start();
       schWriterThread.start();
     }
-    
+
     DeadlockWarningThread wt = new DeadlockWarningThread(threads, 20, 1000);
     while (true) {
       if(!wt.isAlive()) {
@@ -337,7 +334,7 @@ public class ClusterDeadlockTest {
     @Override
     public void run() {
       for (int i =0; i<300; i++) {
-        config.persist(false);
+        config.save();
       }
     }
   }


[16/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
new file mode 100644
index 0000000..782f21d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
@@ -0,0 +1,670 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Bad Local Disks",
+          "description": "Number of unhealthy local disks across all NodeManagers.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "metric_path": "metrics/yarn/BadLocalDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.BadLogDirs",
+              "metric_path": "metrics/yarn/BadLogDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Number of unhealthy local disks for NodeManager",
+              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "YARN local disk space utilization per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "YARN local disk space utilization per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
new file mode 100644
index 0000000..a70fad3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <!-- mapred-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>mapred-env template</display-name>
+    <description>This is the jinja template for mapred-env.sh file</description>
+    <value>
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
+export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..cef2b14
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.framework.path</name>
+    <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-Dhdp.version=${hdp.version}</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
+    <value>1</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
+    <value>30000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.emit-timeline-data</name>
+    <value>false</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.bind-host</name>
+    <value>0.0.0.0</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>mapreduce.jobhistory.recovery.enable</name>
+    <value>true</value>
+    <description>Enable the history server to store server state and recover
+      server state upon startup.  If enabled then
+      mapreduce.jobhistory.recovery.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.class</name>
+    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
+    <description>The HistoryServerStateStoreService class to store history server
+      state for recovery.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
+    <value>/hadoop/mapreduce/jhs</value>
+    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
+      is configured as the recovery storage class.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..4768e46
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <display-name>CPU Scheduling</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+    <value>*</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>capacity-scheduler</name>
+    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
new file mode 100644
index 0000000..a6b1baa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
@@ -0,0 +1,177 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/db/spool</value>
+    <description>/var/log/hadoop/yarn/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
+    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
new file mode 100644
index 0000000..97867cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for YARN</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-yarn-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for YARN</display-name>
+    <description>Enable ranger yarn plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
new file mode 100644
index 0000000..5410104
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
new file mode 100644
index 0000000..5f69962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.yarn.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Yarn instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
new file mode 100644
index 0000000..bbc2930
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true" supports_adding_forbidden="true">
+  <!-- These properties were inherited from HDP 2.1 -->
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn_cgroups_enabled</name>
+    <value>false</value>
+    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+    <display-name>CPU Isolation</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.3 -->
+  <property>
+    <name>is_supported_yarn_ranger</name>
+    <value>true</value>
+    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>yarn-env template</display-name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+      export JAVA_HOME={{java64_home}}
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+      # We need to add the EWMA appender for the yarn daemons only;
+      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
+      # daemons. This is restrict the EWMA appender to daemons only.
+      INVOKER="${0##*/}"
+      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+      fi
+
+      # User for YARN daemons
+      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+      # resolve links - $0 may be a softlink
+      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+      # some Java parameters
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      if [ "$JAVA_HOME" != "" ]; then
+      #echo "run java in $JAVA_HOME"
+      JAVA_HOME=$JAVA_HOME
+      fi
+
+      if [ "$JAVA_HOME" = "" ]; then
+      echo "Error: JAVA_HOME is not set."
+      exit 1
+      fi
+
+      JAVA=$JAVA_HOME/bin/java
+      JAVA_HEAP_MAX=-Xmx1000m
+
+      # For setting YARN specific HEAP sizes please use this
+      # Parameter and set appropriately
+      YARN_HEAPSIZE={{yarn_heapsize}}
+
+      # check envvars which might override default args
+      if [ "$YARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+      fi
+
+      # Resource Manager specific parameters
+
+      # Specify the max Heapsize for the ResourceManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_RESOURCEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+      # Specify the JVM options to be used when starting the ResourceManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_RESOURCEMANAGER_OPTS=
+
+      # Node Manager specific parameters
+
+      # Specify the max Heapsize for the NodeManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_NODEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+      # Specify the max Heapsize for the timeline server using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1024.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_TIMELINESERVER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+      # Specify the JVM options to be used when starting the NodeManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_NODEMANAGER_OPTS=
+
+      # so that filenames w/ spaces are handled correctly in loops below
+      IFS=
+
+
+      # default log directory and file
+      if [ "$YARN_LOG_DIR" = "" ]; then
+      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+      fi
+      if [ "$YARN_LOGFILE" = "" ]; then
+      YARN_LOGFILE='yarn.log'
+      fi
+
+      # default policy file for service-level authorization
+      if [ "$YARN_POLICYFILE" = "" ]; then
+      YARN_POLICYFILE="hadoop-policy.xml"
+      fi
+
+      # restore ordinary behaviour
+      unset IFS
+
+
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+      fi
+      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
new file mode 100644
index 0000000..9ac34f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>yarn-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+# Audit logging for ResourceManager
+rm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+# Audit logging for NodeManager
+nm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[11/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
index 67762a5..5b6f2f6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>sleepy_user</name>
+    <display-name>sleepy User</display-name>
+    <value>sleepy</value>
+    <property-type>USER</property-type>
+    <description>sleepy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
new file mode 100644
index 0000000..45e33f5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "SLEEPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "sleepy",
+          "principal": {
+            "value": "${sleepy-site/sleepy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "sleepy-site/sleepy_principal_name",
+            "local_username": "${sleepy-site/sleepy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/sleepy.headless.keytab",
+            "owner": {
+              "name": "${sleepy-site/sleepy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "sleepy-site/sleepy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "SLEEPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "sleepy_sleepy",
+              "principal": {
+                "value": "sleepy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "sleepy-site/sleepy.sleepy.kerberos.principal",
+                "local_username": "${sleepy-site/sleepy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/sleepy.service.keytab",
+                "owner": {
+                  "name": "${sleepy-site/sleepy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "sleepy-site/sleepy.sleepy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "sleepy-site/sleepy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "sleepy-site/sleepy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
index 470d809..5b4dbd4 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
@@ -52,6 +52,11 @@
           <default>true</default>
         </theme>
       </themes>
+
+      <configuration-dependencies>
+        <config-type>sleepy-alert-config</config-type>
+      </configuration-dependencies>
+
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/alerts/alert_sleepy_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/alerts/alert_sleepy_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/alerts/alert_sleepy_process.py
new file mode 100644
index 0000000..6005459
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/alerts/alert_sleepy_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{sleepy-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{sleepy-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{sleepy-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{sleepy-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{sleepy-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
index 370d03d..699e35b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Sleepy(Dummy):
   def __init__(self):
     super(Sleepy, self).__init__()
     self.component_name = "SLEEPY"
+    self.principal_conf_name = "sleepy-site"
+    self.principal_name = "sleepy.sleepy.kerberos.principal"
+    self.keytab_conf_name = "sleepy-site"
+    self.keytab_name = "sleepy.sleepy.keytab.file"
 
 if __name__ == "__main__":
   Sleepy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/alerts.json
new file mode 100644
index 0000000..c05127c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/alerts.json
@@ -0,0 +1,20 @@
+{
+    "SNOW": {
+
+        "SNOW_WHITE": [
+            {
+                "name": "snow_process",
+                "label": "Snow Process",
+                "description": "Alert for snow component process status",
+                "interval": 1,
+                "scope": "HOST",
+                "enabled": true,
+                "source": {
+                    "type": "SCRIPT",
+                    "path": "PERF/1.0/services/SNOW/package/alerts/alert_snow_process.py",
+                    "parameters": []
+                }
+            }
+        ]
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
index 67762a5..dfad0ac 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>snow_user</name>
+    <display-name>snow User</display-name>
+    <value>snow</value>
+    <property-type>USER</property-type>
+    <description>snow Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
new file mode 100644
index 0000000..47a979a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "SNOW",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "snow",
+          "principal": {
+            "value": "${snow-site/snow_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "snow-site/snow_principal_name",
+            "local_username": "${snow-site/snow_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/snow.headless.keytab",
+            "owner": {
+              "name": "${snow-site/snow_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "snow-site/snow_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "SNOW_WHITE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "snow_white_snow",
+              "principal": {
+                "value": "snow/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "snow-site/snow.white.kerberos.principal",
+                "local_username": "${snow-site/snow_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/snow.service.keytab",
+                "owner": {
+                  "name": "${snow-site/snow_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "snow-site/snow.white.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "snow-site/snow.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "snow-site/snow.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
index cbb57f6..21e685a 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
@@ -52,6 +52,11 @@
           <default>true</default>
         </theme>
       </themes>
+
+      <configuration-dependencies>
+        <config-type>snow-alert-config</config-type>
+      </configuration-dependencies>
+
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/alerts/alert_snow_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/alerts/alert_snow_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/alerts/alert_snow_process.py
new file mode 100644
index 0000000..2df652c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/alerts/alert_snow_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{snow-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{snow-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{snow-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{snow-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{snow-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
index 41bfa8a..df2b81d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
@@ -33,6 +33,10 @@ class SnowWhite(Dummy):
   def __init__(self):
     super(SnowWhite, self).__init__()
     self.component_name = "SNOW_WHITE"
+    self.principal_conf_name = "snow-site"
+    self.principal_name = "snow.white.kerberos.principal"
+    self.keytab_conf_name = "snow-site"
+    self.keytab_name = "snow.white.keytab.file"
 
 if __name__ == "__main__":
   SnowWhite().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
index c4a58bb..67cf881 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/alerts.json
@@ -1,185 +1,25 @@
 {
   "MAPREDUCE2": {
-    "service": [],
     "HISTORYSERVER": [
       {
-        "name": "mapreduce_history_server_webui",
-        "label": "History Server Web UI",
-        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
+        "name": "mapreduce_history_process",
+        "label": "History Server process",
+        "description": "Alert for history server process status",
         "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_cpu",
-        "label": "History Server CPU Utilization",
-        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_rpc_latency",
-        "label": "History Server RPC Latency",
-        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
+        "scope": "HOST",
         "enabled": true,
         "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
+          "type": "SCRIPT",
+          "path": "PERF/1.0/services/YARN/package/alerts/alert_history_process.py",
+          "parameters": []
         }
       }
     ]
   },
   "YARN": {
-    "service": [
-      {
-        "name": "yarn_nodemanager_webui_percent",
-        "label": "Percent NodeManagers Available",
-        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "yarn_nodemanager_webui",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }
-    ],
+
     "NODEMANAGER": [
       {
-        "name": "yarn_nodemanager_webui",
-        "label": "NodeManager Web UI",
-        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "default_port": 8042,
-            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
         "name": "yarn_nodemanager_health",
         "label": "NodeManager Health",
         "description": "This host-level alert checks the node health property available from the NodeManager component.",
@@ -188,7 +28,7 @@
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
+          "path": "PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py",
           "parameters": [
             {
               "name": "connection.timeout",
@@ -205,186 +45,31 @@
     ],
     "RESOURCEMANAGER": [
       {
-        "name": "yarn_resourcemanager_webui",
-        "label": "ResourceManager Web UI",
-        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
+        "name": "yarn_resourcemanager_process",
+        "label": "ResourceManager process",
+        "description": "Alert for resourcemanager process status",
         "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_cpu",
-        "label": "ResourceManager CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_rpc_latency",
-        "label": "ResourceManager RPC Latency",
-        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "nodemanager_health_summary",
-        "label": "NodeManager Health Summary",
-        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
-        "interval": 1,
-        "scope": "SERVICE",
+        "scope": "HOST",
         "enabled": true,
         "source": {
           "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
+          "path": "PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py",
+          "parameters": []
         }
       }
     ],
     "APP_TIMELINE_SERVER": [
       {
-        "name": "yarn_app_timeline_server_webui",
-        "label": "App Timeline Web UI",
-        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
+        "name": "yarn_app_timeline_server_process",
+        "label": "App Timeline process",
+        "description": "Alert for app timeline server process status",
         "interval": 1,
-        "scope": "ANY",
+        "scope": "HOST",
+        "enabled": true,
         "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline",
-            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
+          "type": "SCRIPT",
+          "path": "PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py",
+          "parameters": []
         }
       }
     ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml
index 8b18e73..bb2191b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/metainfo.xml
@@ -122,6 +122,7 @@
           -->
 
           <configuration-dependencies>
+            <config-type>yarn-alert-config</config-type>
             <config-type>capacity-scheduler</config-type>
             <config-type>hdfs-site</config-type>
           </configuration-dependencies>
@@ -193,6 +194,7 @@
       </components>
 
       <configuration-dependencies>
+        <config-type>yarn-alert-config</config-type>
         <config-type>yarn-site</config-type>
         <config-type>yarn-env</config-type>
         <config-type>hdfs-site</config-type>
@@ -324,6 +326,7 @@
       </requiredServices>
 
       <configuration-dependencies>
+        <config-type>yarn-alert-config</config-type>
         <config-type>hdfs-site</config-type>
         <config-type>hadoop-env</config-type>
         <config-type>core-site</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_history_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_history_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_history_process.py
new file mode 100644
index 0000000..8733679
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_history_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{yarn-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{yarn-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{yarn-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{yarn-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{yarn-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
index d2cfe26..8733679 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
@@ -20,33 +20,31 @@ limitations under the License.
 
 import logging
 
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
 
-OK_MESSAGE = 'NodeManager Healthy'
+ALERT_BEHAVIOUR_TYPE = "{{yarn-alert-config/alert.behavior.type}}"
 
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+ALERT_SUCCESS_PERCENTAGE = "{{yarn-alert-config/alert.success.percentage}}"
 
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+ALERT_TIMEOUT_RETURN_VALUE = "{{yarn-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{yarn-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{yarn-alert-config/alert.flip.interval.mins}}"
 
 logger = logging.getLogger('ambari_alerts')
 
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
 
 def get_tokens():
   """
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
-  YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
-  
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
 
 def execute(configurations={}, parameters={}, host_name=None):
   """
@@ -57,11 +55,5 @@ def execute(configurations={}, parameters={}, host_name=None):
   parameters (dictionary): a mapping of script parameter key to value
   host_name (string): the name of this host where the alert is running
   """
-  result_code = RESULT_CODE_UNKNOWN
-
-  if configurations is None:
-    return (result_code, ['There were no configurations supplied to the script.'])
 
-  result_code = RESULT_CODE_OK
-  label = OK_MESSAGE
-  return (result_code, [label])
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
deleted file mode 100644
index 81527f6..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-OK_MESSAGE = 'All NodeManagers are healthy'
-
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
-
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-logger = logging.getLogger('ambari_alerts')
-
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS, \
-    YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  result_code = RESULT_CODE_UNKNOWN
-
-  if configurations is None:
-    return (result_code, ['There were no configurations supplied to the script.'])
-
-  result_code = RESULT_CODE_OK
-  label = OK_MESSAGE
-
-  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py
new file mode 100644
index 0000000..8733679
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_resourcemanager_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{yarn-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{yarn-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{yarn-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{yarn-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{yarn-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py
new file mode 100644
index 0000000..8733679
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/alerts/alert_timeline_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{yarn-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{yarn-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{yarn-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{yarn-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{yarn-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
index 4b32de7..d179b1f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
@@ -33,6 +33,10 @@ class ApplicationTimelineServer(Dummy):
   def __init__(self):
     super(ApplicationTimelineServer, self).__init__()
     self.component_name = "APP_TIMELINE_SERVER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.timeline-service.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.timeline-service.keytab"
 
 if __name__ == "__main__":
   ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
index cba85b5..0570987 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
@@ -33,6 +33,10 @@ class HistoryServer(Dummy):
   def __init__(self):
     super(HistoryServer, self).__init__()
     self.component_name = "HISTORYSERVER"
+    self.principal_conf_name = "mapred-site"
+    self.principal_name = "mapreduce.jobhistory.principal"
+    self.keytab_conf_name = "mapred-site"
+    self.keytab_name = "mapreduce.jobhistory.keytab"
 
 if __name__ == "__main__":
   HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
index 883c3ad..21db94f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
@@ -33,6 +33,10 @@ class Nodemanager(Dummy):
   def __init__(self):
     super(Nodemanager, self).__init__()
     self.component_name = "NODEMANAGER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.nodemanager.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.nodemanager.keytab"
 
 if __name__ == "__main__":
   Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
index 7f80077..5f6c535 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
@@ -33,6 +33,10 @@ class Resourcemanager(Dummy):
   def __init__(self):
     super(Resourcemanager, self).__init__()
     self.component_name = "RESOURCEMANAGER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.resourcemanager.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.resourcemanager.keytab"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/alerts.json
new file mode 100644
index 0000000..4af7ae9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/alerts.json
@@ -0,0 +1,20 @@
+{
+    "ZOOKEEPER": {
+
+        "ZOOKEEPER_SERVER": [
+            {
+                "name": "zookeeper_server_process",
+                "label": "Zookeeper server Process",
+                "description": "Alert for zk server component process status",
+                "interval": 1,
+                "scope": "HOST",
+                "enabled": true,
+                "source": {
+                    "type": "SCRIPT",
+                    "path": "PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py",
+                    "parameters": []
+                }
+            }
+        ]
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/configuration/zk-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/configuration/zk-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/configuration/zk-alert-config.xml
new file mode 100644
index 0000000..392eea7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/configuration/zk-alert-config.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+    <property>
+        <name>alert.behavior.type</name>
+        <value>percentage</value>
+        <description>
+            This property describes type of alert behaviour.
+            There are three types percentage, timeout, flip.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.success.percentage</name>
+        <value>100</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "percentage". Here you should set percent of successful
+            alert checks.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.timeout.return.value</name>
+        <value>false</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set result which alert will
+            return after timeout, false|true|none.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>alert.timeout.secs</name>
+        <value>120</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "timeout". Here you should set number of seconds for
+            alert to sleep.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>alert.flip.interval.mins</name>
+        <value>3</value>
+        <description>
+            This property will be actual only when alert.behaviour.type
+            set to "flip". Here you should set number of minutes at which
+            the alert should flip from true|false.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
new file mode 100644
index 0000000..0a64ea5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
@@ -0,0 +1,39 @@
+{
+  "services": [
+    {
+      "name": "ZOOKEEPER",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZOOKEEPER_SERVER",
+          "identities": [
+            {
+              "name": "zookeeper_zk",
+              "principal": {
+                "value": "zookeeper/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "zookeeper-env/zookeeper_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/zk.service.keytab",
+                "owner": {
+                  "name": "${zookeeper-env/zk_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "zookeeper-env/zookeeper_keytab_path"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/metainfo.xml
index 62ea041..57667bb 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/metainfo.xml
@@ -43,6 +43,10 @@
         </component>
       </components>
 
+      <configuration-dependencies>
+        <config-type>zk-alert-config</config-type>
+      </configuration-dependencies>
+
       <!-- No packages to install. -->
       <osSpecifics></osSpecifics>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py
new file mode 100644
index 0000000..55bbbd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/alerts/alert_zk_server_process.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
+
+ALERT_BEHAVIOUR_TYPE = "{{zk-alert-config/alert.behavior.type}}"
+
+ALERT_SUCCESS_PERCENTAGE = "{{zk-alert-config/alert.success.percentage}}"
+
+ALERT_TIMEOUT_RETURN_VALUE = "{{zk-alert-config/alert.timeout.return.value}}"
+ALERT_TIMEOUT_SECS = "{{zk-alert-config/alert.timeout.secs}}"
+
+ALERT_FLIP_INTERVAL_MINS = "{{zk-alert-config/alert.flip.interval.mins}}"
+
+logger = logging.getLogger('ambari_alerts')
+
+alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
+                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
+                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
+          ALERT_FLIP_INTERVAL_MINS)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
index 92519ba..6ab88bb 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -33,6 +33,10 @@ class ZookeeperServer(Dummy):
   def __init__(self):
     super(ZookeeperServer, self).__init__()
     self.component_name = "ZOOKEEPER_SERVER"
+    self.principal_conf_name = "zookeeper-env"
+    self.principal_name = "zookeeper_principal_name"
+    self.keytab_conf_name = "zookeeper-env"
+    self.keytab_name = "zookeeper_keytab_path"
 
 if __name__ == "__main__":
   ZookeeperServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index f6191f8..8148379 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -972,6 +972,18 @@ class DefaultStackAdvisor(StackAdvisor):
       return None
     return siteConfig.get("properties")
 
+  def getServicesSiteProperties(self, services, siteName):
+    if not services:
+      return None
+
+    configurations = services.get("configurations")
+    if not configurations:
+      return None
+    siteConfig = configurations.get(siteName)
+    if siteConfig is None:
+      return None
+    return siteConfig.get("properties")
+
   def putProperty(self, config, configType, services=None):
     userConfigs = {}
     changedConfigs = []
@@ -1040,14 +1052,27 @@ class DefaultStackAdvisor(StackAdvisor):
       config[configType]["property_attributes"][key][attribute] = attributeValue if isinstance(attributeValue, list) else str(attributeValue)
     return appendPropertyAttribute
 
-
-  """
-  Returns the hosts which are running the given component.
-  """
   def getHosts(self, componentsList, componentName):
+    """
+    Returns the hosts which are running the given component.
+    """
     hostNamesList = [component["hostnames"] for component in componentsList if component["component_name"] == componentName]
     return hostNamesList[0] if len(hostNamesList) > 0 else []
 
+  def getMountPoints(self, hosts):
+    """
+    Return list of mounts available on the hosts
+
+    :type hosts dict
+    """
+    mount_points = []
+
+    for item in hosts["items"]:
+      if "disk_info" in item["Hosts"]:
+        mount_points.append(item["Hosts"]["disk_info"])
+
+    return mount_points
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.
@@ -1084,3 +1109,179 @@ class DefaultStackAdvisor(StackAdvisor):
 
   def getServiceNames(self, services):
     return [service["StackServices"]["service_name"] for service in services["services"]]
+
+  def filterHostMounts(self, hosts, services):
+    """
+    Filter mounts on the host using agent_mounts_ignore_list, by excluding and record with mount-point
+     mentioned in agent_mounts_ignore_list.
+
+    This function updates hosts dictionary
+
+    Example:
+
+      agent_mounts_ignore_list : "/run/secrets"
+
+      Hosts record :
+
+       "disk_info" : [
+          {
+              ...
+            "mountpoint" : "/"
+          },
+          {
+              ...
+            "mountpoint" : "/run/secrets"
+          }
+        ]
+
+      Result would be :
+
+        "disk_info" : [
+          {
+              ...
+            "mountpoint" : "/"
+          }
+        ]
+
+    :type hosts dict
+    :type services dict
+    """
+    if not services or "items" not in hosts:
+      return hosts
+
+    banned_filesystems = ["devtmpfs", "tmpfs", "vboxsf", "cdfs"]
+    banned_mount_points = ["/etc/resolv.conf", "/etc/hostname", "/boot", "/mnt", "/tmp", "/run/secrets"]
+
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    ignore_list = []
+
+    if cluster_env and "agent_mounts_ignore_list" in cluster_env and cluster_env["agent_mounts_ignore_list"].strip():
+      ignore_list = [x.strip() for x in cluster_env["agent_mounts_ignore_list"].strip().split(",")]
+
+    ignore_list.extend(banned_mount_points)
+
+    for host in hosts["items"]:
+      if "Hosts" not in host and "disk_info" not in host["Hosts"]:
+        continue
+
+      host = host["Hosts"]
+      disk_info = []
+
+      for disk in host["disk_info"]:
+        if disk["mountpoint"] not in ignore_list\
+          and disk["type"].lower() not in banned_filesystems:
+          disk_info.append(disk)
+
+      host["disk_info"] = disk_info
+
+    return hosts
+
+  def __getSameHostMounts(self, hosts):
+    """
+    Return list of the mounts which are same and present on all hosts
+
+    :type hosts dict
+    :rtype list
+    """
+    if not hosts:
+      return None
+
+    hostMounts = self.getMountPoints(hosts)
+    mounts = []
+    for m in hostMounts:
+      host_mounts = set([item["mountpoint"] for item in m])
+      mounts = host_mounts if not mounts else mounts & host_mounts
+
+    return sorted(mounts)
+
+  def getMountPathVariations(self, initial_value, component_name, services, hosts):
+    """
+    Recommends best fitted mount by prefixing path with it.
+
+    :return return list of paths with properly selected paths. If no recommendation possible,
+     would be returned empty list
+
+    :type initial_value str
+    :type component_name str
+    :type services dict
+    :type hosts dict
+    :rtype list
+    """
+    available_mounts = []
+
+    if not initial_value:
+      return available_mounts
+
+    mounts = self.__getSameHostMounts(hosts)
+    sep = "/"
+
+    if not mounts:
+      return available_mounts
+
+    for mount in mounts:
+      new_mount = initial_value if mount == "/" else os.path.join(mount + sep, initial_value.lstrip(sep))
+      if new_mount not in available_mounts:
+        available_mounts.append(new_mount)
+
+    # no list transformations after filling the list, because this will cause item order change
+    return available_mounts
+
+  def getMountPathVariation(self, initial_value, component_name, services, hosts):
+    """
+    Recommends best fitted mount by prefixing path with it.
+
+    :return return list of paths with properly selected paths. If no recommendation possible,
+     would be returned empty list
+
+    :type initial_value str
+        :type component_name str
+    :type services dict
+    :type hosts dict
+    :rtype str
+    """
+    try:
+      return [self.getMountPathVariations(initial_value, component_name, services, hosts)[0]]
+    except IndexError:
+      return []
+
+  def updateMountProperties(self, siteConfig, propertyDefinitions, configurations,  services, hosts):
+    """
+    Update properties according to recommendations for available mount-points
+
+    propertyDefinitions is an array of set : property name, component name, initial value, recommendation type
+
+     Where,
+
+       property name - name of the property
+       component name, name of the component to which belongs this property
+       initial value - initial path
+       recommendation type - could be "multi" or "single". This describes recommendation strategy, to use only one disk
+        or use all available space on the host
+
+    :type propertyDefinitions list
+    :type siteConfig str
+    :type configurations dict
+    :type services dict
+    :type hosts dict
+    """
+
+    props = self.getServicesSiteProperties(services, siteConfig)
+    put_f = self.putProperty(configurations, siteConfig, services)
+
+    for prop_item in propertyDefinitions:
+      name, component, default_value, rc_type = prop_item
+      recommendation = None
+
+      if props is None or name not in props:
+        if rc_type == "multi":
+          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+        else:
+          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+      elif props and name in props and props[name] == default_value:
+        if rc_type == "multi":
+          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+        else:
+          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+
+      if recommendation:
+        put_f(name, ",".join(recommendation))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/upgrade-pack.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade-pack.xsd b/ambari-server/src/main/resources/upgrade-pack.xsd
index 2871f05..aeec803 100644
--- a/ambari-server/src/main/resources/upgrade-pack.xsd
+++ b/ambari-server/src/main/resources/upgrade-pack.xsd
@@ -66,6 +66,11 @@
   <xs:simpleType name="config-condition-comparison-type">
     <xs:restriction base="xs:string">
       <xs:enumeration value="equals" />
+      <xs:enumeration value="not-equals" />
+      <xs:enumeration value="contains" />
+      <xs:enumeration value="not-contains" />
+      <xs:enumeration value="exists" />
+      <xs:enumeration value="not-exists" />
     </xs:restriction>
   </xs:simpleType>
   
@@ -76,7 +81,8 @@
       <xs:extension base="abstract-condition-type">  
         <xs:attribute name="type" type="xs:Name" use="required"/>
         <xs:attribute name="property" type="xs:Name" use="required"/>
-        <xs:attribute name="value" type="xs:string" use="required"/>
+        <xs:attribute name="value" type="xs:string" use="optional"/>
+        <xs:attribute name="return_value_if_config_missing" type="xs:boolean" use="optional"/>
         <xs:attribute name="comparison" type="config-condition-comparison-type" use="required"/>
       </xs:extension>
     </xs:complexContent>
@@ -392,7 +398,15 @@
         <xs:element name="type" type="upgrade-kind-type" />
         <xs:element name="prerequisite-checks" type="prerequisite-check-type" minOccurs="0" />
         <xs:element name="upgrade-path" type="upgrade-path-type" minOccurs="0" /> 
-        <xs:element name="order" type="order-type" />
+        <xs:element name="order" type="order-type">
+          <xs:unique name="unique-by-group">
+            <xs:annotation>
+              <xs:documentation>Ensures that the element "processing" does not have duplicate services</xs:documentation>
+            </xs:annotation>
+            <xs:selector xpath="group" />
+            <xs:field xpath="@name" />
+          </xs:unique>
+        </xs:element>
         <xs:element name="processing" type="processing-type">
           <xs:unique name="unique-by-service">
             <xs:annotation>


[22/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 9917720..fe1f338 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -17,18 +17,22 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.controller.internal.ConfigurationResourceProvider;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
@@ -44,213 +48,195 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
 
 public class ConfigGroupImpl implements ConfigGroup {
   private static final Logger LOG = LoggerFactory.getLogger(ConfigGroupImpl.class);
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
 
   private Cluster cluster;
-  private ConfigGroupEntity configGroupEntity;
-  private Map<Long, Host> hosts;
-  private Map<String, Config> configurations;
-  private volatile boolean isPersisted = false;
-
-  @Inject
-  private Gson gson;
-  @Inject
-  private ConfigGroupDAO configGroupDAO;
-  @Inject
-  private ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
-  @Inject
-  private ConfigGroupHostMappingDAO configGroupHostMappingDAO;
-  @Inject
-  private HostDAO hostDAO;
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  Clusters clusters;
+  private ConcurrentMap<Long, Host> m_hosts;
+  private ConcurrentMap<String, Config> m_configurations;
+  private String configGroupName;
+  private long configGroupId;
+
+  /**
+   * This lock is required to prevent inconsistencies in internal state between
+   * {@link #m_hosts} and the entities stored by the {@link ConfigGroupEntity}.
+   */
+  private final ReadWriteLock hostLock;
+
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String hostLockLabel = "configurationGroupHostLock";
+
+  private final ConfigGroupDAO configGroupDAO;
+
+  private final ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
+
+  private final ConfigGroupHostMappingDAO configGroupHostMappingDAO;
+
+  private final HostDAO hostDAO;
+
+  private final ClusterDAO clusterDAO;
+
+  private final ConfigFactory configFactory;
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
-                         @Assisted("name") String name,
-                         @Assisted("tag") String tag,
-                         @Assisted("description") String description,
-                         @Assisted("configs") Map<String, Config> configs,
-                         @Assisted("hosts") Map<Long, Host> hosts,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configurations,
+      @Assisted("hosts") Map<Long, Host> hosts, Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupName = name;
 
-    configGroupEntity = new ConfigGroupEntity();
+    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
     configGroupEntity.setClusterId(cluster.getClusterId());
     configGroupEntity.setGroupName(name);
     configGroupEntity.setTag(tag);
     configGroupEntity.setDescription(description);
 
-    if (hosts != null) {
-      this.hosts = hosts;
-    } else {
-      this.hosts = new HashMap<Long, Host>();
-    }
+    m_hosts = hosts == null ? new ConcurrentHashMap<Long, Host>()
+        : new ConcurrentHashMap<>(hosts);
 
-    if (configs != null) {
-      configurations = configs;
-    } else {
-      configurations = new HashMap<String, Config>();
-    }
+    m_configurations = configurations == null ? new ConcurrentHashMap<String, Config>()
+        : new ConcurrentHashMap<>(configurations);
+
+    // save the entity and grab the ID
+    persist(configGroupEntity);
+    configGroupId = configGroupEntity.getGroupId();
   }
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted Cluster cluster,
-                         @Assisted ConfigGroupEntity configGroupEntity,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted Cluster cluster, @Assisted ConfigGroupEntity configGroupEntity,
+      Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupId = configGroupEntity.getGroupId();
+    configGroupName = configGroupEntity.getGroupName();
 
-    this.configGroupEntity = configGroupEntity;
-    configurations = new HashMap<String, Config>();
-    hosts = new HashMap<Long, Host>();
+    m_configurations = new ConcurrentHashMap<String, Config>();
+    m_hosts = new ConcurrentHashMap<Long, Host>();
 
     // Populate configs
-    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity
-      .getConfigGroupConfigMappingEntities()) {
-
+    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity.getConfigGroupConfigMappingEntities()) {
       Config config = cluster.getConfig(configMappingEntity.getConfigType(),
         configMappingEntity.getVersionTag());
 
       if (config != null) {
-        configurations.put(config.getType(), config);
+        m_configurations.put(config.getType(), config);
       } else {
-        LOG.warn("Unable to find config mapping for config group"
-          + ", clusterName = " + cluster.getClusterName()
-          + ", type = " + configMappingEntity.getConfigType()
-          + ", tag = " + configMappingEntity.getVersionTag());
+        LOG.warn("Unable to find config mapping {}/{} for config group in cluster {}",
+            configMappingEntity.getConfigType(), configMappingEntity.getVersionTag(),
+            cluster.getClusterName());
       }
     }
 
     // Populate Hosts
-    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity
-      .getConfigGroupHostMappingEntities()) {
-
+    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity.getConfigGroupHostMappingEntities()) {
       try {
         Host host = clusters.getHost(hostMappingEntity.getHostname());
         HostEntity hostEntity = hostMappingEntity.getHostEntity();
         if (host != null && hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
+          m_hosts.put(hostEntity.getHostId(), host);
         }
       } catch (AmbariException e) {
-        String msg = "Host seems to be deleted but Config group mapping still " +
-          "exists !";
-        LOG.warn(msg);
-        LOG.debug(msg, e);
+        LOG.warn("Host seems to be deleted but Config group mapping still exists !");
+        LOG.debug("Host seems to be deleted but Config group mapping still exists !", e);
       }
     }
-
-    isPersisted = true;
   }
 
   @Override
   public Long getId() {
-    return configGroupEntity.getGroupId();
+    return configGroupId;
   }
 
   @Override
   public String getName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getGroupName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return configGroupName;
   }
 
   @Override
   public void setName(String name) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setGroupName(name);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setGroupName(name);
+    configGroupDAO.merge(configGroupEntity);
 
+    configGroupName = name;
   }
 
   @Override
   public String getClusterName() {
-    return configGroupEntity.getClusterEntity().getClusterName();
+    return cluster.getClusterName();
   }
 
   @Override
   public String getTag() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getTag();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getTag();
   }
 
   @Override
   public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setTag(tag);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setTag(tag);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public String getDescription() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getDescription();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getDescription();
   }
 
   @Override
   public void setDescription(String description) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setDescription(description);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setDescription(description);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public Map<Long, Host> getHosts() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(hosts);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return Collections.unmodifiableMap(m_hosts);
   }
 
   @Override
   public Map<String, Config> getConfigurations() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(configurations);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return Collections.unmodifiableMap(m_configurations);
   }
 
   /**
@@ -259,13 +245,14 @@ public class ConfigGroupImpl implements ConfigGroup {
    */
   @Override
   public void setHosts(Map<Long, Host> hosts) {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      this.hosts = hosts;
+      // persist enitites in a transaction first, then update internal state
+      replaceHostMappings(hosts);
+      m_hosts = new ConcurrentHashMap<>(hosts);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
-
   }
 
   /**
@@ -273,115 +260,140 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @param configs
    */
   @Override
-  public void setConfigurations(Map<String, Config> configs) {
-    readWriteLock.writeLock().lock();
-    try {
-      configurations = configs;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+  public void setConfigurations(Map<String, Config> configurations) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ClusterEntity clusterEntity = configGroupEntity.getClusterEntity();
+
+    // only update the internal state after the configurations have been
+    // persisted
+    persistConfigMapping(clusterEntity, configGroupEntity, configurations);
+    m_configurations = new ConcurrentHashMap<>(configurations);
   }
 
   @Override
-  @Transactional
   public void removeHost(Long hostId) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts.containsKey(hostId)) {
-        String hostName = hosts.get(hostId).getHostName();
-        LOG.info("Removing host from config group, hostid = " + hostId + ", hostname = " + hostName);
-        hosts.remove(hostId);
-        try {
-          ConfigGroupHostMappingEntityPK hostMappingEntityPK = new
-            ConfigGroupHostMappingEntityPK();
-          hostMappingEntityPK.setHostId(hostId);
-          hostMappingEntityPK.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupHostMappingDAO.removeByPK(hostMappingEntityPK);
-        } catch (Exception e) {
-          LOG.error("Failed to delete config group host mapping"
-            + ", clusterName = " + getClusterName()
-            + ", id = " + getId()
-            + ", hostid = " + hostId
-            + ", hostname = " + hostName, e);
-          throw new AmbariException(e.getMessage());
-        }
+      Host host = m_hosts.get(hostId);
+      if (null == host) {
+        return;
       }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
 
-  @Override
-  public void persist() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!isPersisted) {
-        persistEntities();
-        refresh();
-        cluster.refresh();
-        isPersisted = true;
-      } else {
-        saveIfPersisted();
+      String hostName = host.getHostName();
+      LOG.info("Removing host (id={}, name={}) from config group", host.getHostId(), hostName);
+
+      try {
+        // remove the entities first, then update internal state
+        removeConfigGroupHostEntity(host);
+        m_hosts.remove(hostId);
+      } catch (Exception e) {
+        LOG.error("Failed to delete config group host mapping for cluster {} and host {}",
+            cluster.getClusterName(), hostName, e);
+
+        throw new AmbariException(e.getMessage());
       }
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
   /**
+   * Removes the {@link ConfigGroupHostMappingEntity} for the specified host
+   * from this configuration group.
+   *
+   * @param host
+   *          the host to remove.
+   */
+  @Transactional
+  void removeConfigGroupHostEntity(Host host) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupHostMappingEntityPK hostMappingEntityPK = new ConfigGroupHostMappingEntityPK();
+    hostMappingEntityPK.setHostId(host.getHostId());
+    hostMappingEntityPK.setConfigGroupId(configGroupId);
+
+    ConfigGroupHostMappingEntity configGroupHostMapping = configGroupHostMappingDAO.findByPK(
+        hostMappingEntityPK);
+
+    configGroupHostMappingDAO.remove(configGroupHostMapping);
+
+    configGroupEntity.getConfigGroupHostMappingEntities().remove(configGroupHostMapping);
+    configGroupEntity = configGroupDAO.merge(getConfigGroupEntity());
+  }
+
+  /**
+   * @param configGroupEntity
+   */
+  private void persist(ConfigGroupEntity configGroupEntity) {
+    persistEntities(configGroupEntity);
+    cluster.refresh();
+  }
+
+  /**
    * Persist Config group with host mapping and configurations
    *
    * @throws Exception
    */
   @Transactional
-  void persistEntities() {
+  void persistEntities(ConfigGroupEntity configGroupEntity) {
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
     configGroupEntity.setClusterEntity(clusterEntity);
     configGroupEntity.setTimestamp(System.currentTimeMillis());
     configGroupDAO.create(configGroupEntity);
 
-    persistConfigMapping(clusterEntity);
-    persistHostMapping();
-  }
+    configGroupId = configGroupEntity.getGroupId();
 
-  // TODO: Test rollback scenario
+    persistConfigMapping(clusterEntity, configGroupEntity, m_configurations);
+    replaceHostMappings(m_hosts);
+  }
 
   /**
-   * Persist host mapping
+   * Replaces all existing host mappings with the new collection of hosts.
    *
+   * @param the
+   *          new hosts
    * @throws Exception
    */
-  @Override
   @Transactional
-  public void persistHostMapping() {
-    if (isPersisted) {
-      // Delete existing mappings and create new ones
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupHostMappingEntities(new HashSet<ConfigGroupHostMappingEntity>());
-    }
+  void replaceHostMappings(Map<Long, Host> hosts) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+
+    // Delete existing mappings and create new ones
+    configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupHostMappingEntities(
+        new HashSet<ConfigGroupHostMappingEntity>());
 
     if (hosts != null && !hosts.isEmpty()) {
-      for (Host host : hosts.values()) {
-        HostEntity hostEntity = hostDAO.findById(host.getHostId());
-        if (hostEntity != null) {
-          ConfigGroupHostMappingEntity hostMappingEntity = new
-            ConfigGroupHostMappingEntity();
-          hostMappingEntity.setHostId(hostEntity.getHostId());
-          hostMappingEntity.setHostEntity(hostEntity);
-          hostMappingEntity.setConfigGroupEntity(configGroupEntity);
-          hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupEntity.getConfigGroupHostMappingEntities().add
-                  (hostMappingEntity);
-          configGroupHostMappingDAO.create(hostMappingEntity);
-        } else {
-          LOG.warn("Host seems to be deleted, cannot create host to config " +
-            "group mapping, host = " + host.getHostName());
-        }
+      configGroupEntity = persistHostMapping(hosts.values(), configGroupEntity);
+    }
+  }
+
+  /**
+   * Adds the collection of hosts to the configuration group.
+   *
+   * @param hostEntity
+   * @param configGroupEntity
+   */
+  @Transactional
+  ConfigGroupEntity persistHostMapping(Collection<Host> hosts,
+      ConfigGroupEntity configGroupEntity) {
+    for (Host host : hosts) {
+      HostEntity hostEntity = hostDAO.findById(host.getHostId());
+      if (hostEntity != null) {
+        ConfigGroupHostMappingEntity hostMappingEntity = new ConfigGroupHostMappingEntity();
+        hostMappingEntity.setHostId(hostEntity.getHostId());
+        hostMappingEntity.setHostEntity(hostEntity);
+        hostMappingEntity.setConfigGroupEntity(configGroupEntity);
+        hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
+        configGroupEntity.getConfigGroupHostMappingEntities().add(hostMappingEntity);
+        configGroupHostMappingDAO.create(hostMappingEntity);
+      } else {
+        LOG.warn(
+            "The host {} has been removed from the cluster and cannot be added to the configuration group {}",
+            host.getHostName(), configGroupName);
       }
     }
-    // TODO: Make sure this does not throw Nullpointer based on JPA docs
-    configGroupEntity = configGroupDAO.merge(configGroupEntity);
+
+    return configGroupDAO.merge(configGroupEntity);
   }
 
   /**
@@ -391,42 +403,31 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @throws Exception
    */
   @Transactional
-  void persistConfigMapping(ClusterEntity clusterEntity) {
-    if (isPersisted) {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupConfigMappingEntities(new HashSet<ConfigGroupConfigMappingEntity>());
-    }
+  void persistConfigMapping(ClusterEntity clusterEntity,
+      ConfigGroupEntity configGroupEntity, Map<String, Config> configurations) {
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupConfigMappingEntities(
+        new HashSet<ConfigGroupConfigMappingEntity>());
 
     if (configurations != null && !configurations.isEmpty()) {
-      for (Config config : configurations.values()) {
+      for (Entry<String, Config> entry : configurations.entrySet()) {
+        Config config = entry.getValue();
         ClusterConfigEntity clusterConfigEntity = clusterDAO.findConfig
           (cluster.getClusterId(), config.getType(), config.getTag());
 
         if (clusterConfigEntity == null) {
-          config.setVersion(cluster.getNextConfigVersion(config.getType()));
-          config.setStackId(cluster.getDesiredStackVersion());
-          // Create configuration
-          clusterConfigEntity = new ClusterConfigEntity();
-          clusterConfigEntity.setClusterId(clusterEntity.getClusterId());
-          clusterConfigEntity.setClusterEntity(clusterEntity);
-          clusterConfigEntity.setStack(clusterEntity.getDesiredStack());
-          clusterConfigEntity.setType(config.getType());
-          clusterConfigEntity.setVersion(config.getVersion());
-          clusterConfigEntity.setTag(config.getTag());
-          clusterConfigEntity.setData(gson.toJson(config.getProperties()));
-          if (null != config.getPropertiesAttributes()) {
-            clusterConfigEntity.setAttributes(gson.toJson(config.getPropertiesAttributes()));
-          }
-          clusterConfigEntity.setTimestamp(System.currentTimeMillis());
-          clusterDAO.createConfig(clusterConfigEntity);
-          clusterEntity.getClusterConfigEntities().add(clusterConfigEntity);
-          cluster.addConfig(config);
-          clusterDAO.merge(clusterEntity);
-          cluster.refresh();
+          config = configFactory.createNew(cluster, config.getType(), config.getTag(),
+              config.getProperties(), config.getPropertiesAttributes());
+
+          entry.setValue(config);
+
+          clusterConfigEntity = clusterDAO.findConfig(cluster.getClusterId(), config.getType(),
+              config.getTag());
         }
 
         ConfigGroupConfigMappingEntity configMappingEntity =
           new ConfigGroupConfigMappingEntity();
+
         configMappingEntity.setTimestamp(System.currentTimeMillis());
         configMappingEntity.setClusterId(clusterEntity.getClusterId());
         configMappingEntity.setClusterConfigEntity(clusterConfigEntity);
@@ -443,142 +444,84 @@ public class ConfigGroupImpl implements ConfigGroup {
     }
   }
 
-  void saveIfPersisted() {
-    if (isPersisted) {
-      save(clusterDAO.findById(cluster.getClusterId()));
-    }
-  }
-
-  @Transactional
-  void save(ClusterEntity clusterEntity) {
-    persistHostMapping();
-    persistConfigMapping(clusterEntity);
-  }
-
   @Override
+  @Transactional
   public void delete() {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupDAO.removeByPK(configGroupEntity.getGroupId());
-      cluster.refresh();
-      isPersisted = false;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupId);
+    configGroupHostMappingDAO.removeAllByGroup(configGroupId);
+    configGroupDAO.removeByPK(configGroupId);
+    cluster.refresh();
   }
 
   @Override
   public void addHost(Host host) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts != null && !hosts.isEmpty()) {
-        for (Host h : hosts.values()) {
-          if (h.getHostName().equals(host.getHostName())) {
-            throw new DuplicateResourceException("Host " + h.getHostName() +
-              "is already associated with Config Group " +
-              configGroupEntity.getGroupName());
-          }
-        }
-        HostEntity hostEntity = hostDAO.findByName(host.getHostName());
-        if (hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
+      if (m_hosts.containsKey(host.getHostId())) {
+        String message = String.format(
+            "Host %s is already associated with the configuration group %s", host.getHostName(),
+            configGroupName);
 
-  @Override
-  public void addConfiguration(Config config) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      if (configurations != null && !configurations.isEmpty()) {
-        for (Config c : configurations.values()) {
-          if (c.getType().equals(config.getType()) && c.getTag().equals
-            (config.getTag())) {
-            throw new DuplicateResourceException("Config " + config.getType() +
-              " with tag " + config.getTag() + " is already associated " +
-              "with Config Group " + configGroupEntity.getGroupName());
-          }
-        }
-        configurations.put(config.getType(), config);
+        throw new DuplicateResourceException(message);
       }
+
+      // ensure that we only update the in-memory structure if the merge was
+      // successful
+      ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+      persistHostMapping(Collections.singletonList(host), configGroupEntity);
+      m_hosts.putIfAbsent(host.getHostId(), host);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
   @Override
   public ConfigGroupResponse convertToResponse() throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
-      for (Host host : hosts.values()) {
-        Map<String, Object> hostMap = new HashMap<String, Object>();
-        hostMap.put("host_name", host.getHostName());
-        hostnames.add(hostMap);
-      }
-
-      Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
+    Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+    for (Host host : m_hosts.values()) {
+      Map<String, Object> hostMap = new HashMap<String, Object>();
+      hostMap.put("host_name", host.getHostName());
+      hostnames.add(hostMap);
+    }
 
-      for (Config config : configurations.values()) {
-        Map<String, Object> configMap = new HashMap<String, Object>();
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
-            config.getType());
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
-            config.getTag());
-        configObjMap.add(configMap);
-      }
+    Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
 
-      ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
-          configGroupEntity.getGroupId(), cluster.getClusterName(),
-          configGroupEntity.getGroupName(), configGroupEntity.getTag(),
-          configGroupEntity.getDescription(), hostnames, configObjMap);
-      return configGroupResponse;
-    } finally {
-      readWriteLock.readLock().unlock();
+    for (Config config : m_configurations.values()) {
+      Map<String, Object> configMap = new HashMap<String, Object>();
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+          config.getType());
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+          config.getTag());
+      configObjMap.add(configMap);
     }
-  }
 
-  @Override
-  @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted) {
-        ConfigGroupEntity groupEntity = configGroupDAO.findById
-          (configGroupEntity.getGroupId());
-        configGroupDAO.refresh(groupEntity);
-        // TODO What other entities should refresh?
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+        configGroupEntity.getGroupId(), cluster.getClusterName(),
+        configGroupEntity.getGroupName(), configGroupEntity.getTag(),
+        configGroupEntity.getDescription(), hostnames, configObjMap);
+    return configGroupResponse;
   }
 
-
   @Override
   public String getServiceName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getServiceName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getServiceName();
   }
 
   @Override
   public void setServiceName(String serviceName) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setServiceName(serviceName);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setServiceName(serviceName);
+    configGroupDAO.merge(configGroupEntity);
+  }
 
+  /**
+   * Gets the {@link ConfigGroupEntity} by it's ID from the JPA cache.
+   *
+   * @return the entity.
+   */
+  private ConfigGroupEntity getConfigGroupEntity() {
+    return configGroupDAO.findById(configGroupId);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index dd5e635..a444f5d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -954,7 +954,7 @@ public class HostImpl implements Host {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity) {
+  void persistEntities(HostEntity hostEntity) {
     hostDAO.create(hostEntity);
     if (!hostEntity.getClusterEntities().isEmpty()) {
       for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
index 1ea5558..a92aa04 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
@@ -190,7 +190,7 @@ public class RetryUpgradeActionService extends AbstractScheduledService {
    * @param requestId Request Id to search tasks for.
    */
   @Transactional
-  private void retryHoldingCommandsInRequest(Long requestId) {
+  void retryHoldingCommandsInRequest(Long requestId) {
     if (requestId == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
index edf5c89..76f1897 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
@@ -52,6 +52,8 @@ import org.slf4j.LoggerFactory;
 @XmlAccessorType(XmlAccessType.FIELD)
 public class UpgradePack {
 
+  private static final String ALL_VERSIONS = "*";
+
   private static Logger LOG = LoggerFactory.getLogger(UpgradePack.class);
 
   /**
@@ -287,10 +289,15 @@ public class UpgradePack {
     if (direction.isUpgrade()) {
       list = groups;
     } else {
-      if (type == UpgradeType.ROLLING) {
-        list = getDowngradeGroupsForRolling();
-      } else if (type == UpgradeType.NON_ROLLING) {
-        list = getDowngradeGroupsForNonrolling();
+      switch (type) {
+        case NON_ROLLING:
+          list = getDowngradeGroupsForNonrolling();
+          break;
+        case HOST_ORDERED:
+        case ROLLING:
+        default:
+          list = getDowngradeGroupsForRolling();
+          break;
       }
     }
 
@@ -315,7 +322,6 @@ public class UpgradePack {
   public boolean canBeApplied(String targetVersion){
     // check that upgrade pack can be applied to selected stack
     // converting 2.2.*.* -> 2\.2(\.\d+)?(\.\d+)?(-\d+)?
-
     String regexPattern = getTarget().replaceAll("\\.", "\\\\."); // . -> \.
     regexPattern = regexPattern.replaceAll("\\\\\\.\\*", "(\\\\\\.\\\\d+)?"); // \.* -> (\.\d+)?
     regexPattern = regexPattern.concat("(-\\d+)?");
@@ -357,6 +363,12 @@ public class UpgradePack {
   private List<Grouping> getDowngradeGroupsForRolling() {
     List<Grouping> reverse = new ArrayList<Grouping>();
 
+    // !!! Testing exposed groups.size() == 1 issue.  Normally there's no precedent for
+    // a one-group upgrade pack, so take it into account anyway.
+    if (groups.size() == 1) {
+      return groups;
+    }
+
     int idx = 0;
     int iter = 0;
     Iterator<Grouping> it = groups.iterator();
@@ -453,6 +465,15 @@ public class UpgradePack {
   }
 
   /**
+   * @return {@code true} if the upgrade targets any version or stack.  Both
+   * {@link #target} and {@link #targetStack} must equal "*"
+   */
+  public boolean isAllTarget() {
+    return ALL_VERSIONS.equals(target) && ALL_VERSIONS.equals(targetStack);
+  }
+
+
+  /**
    * A service definition that holds a list of components in the 'order' element.
    */
   public static class OrderService {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigurationCondition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigurationCondition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigurationCondition.java
index 1bd88e4..d229270 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigurationCondition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigurationCondition.java
@@ -52,7 +52,37 @@ public final class ConfigurationCondition extends Condition {
      * Equals comparison.
      */
     @XmlEnumValue("equals")
-    EQUALS;
+    EQUALS,
+
+    /**
+     * Not equals comparison.
+     */
+    @XmlEnumValue("not-equals")
+    NOT_EQUALS,
+
+    /**
+     * String contains.
+     */
+    @XmlEnumValue("contains")
+    CONTAINS,
+
+    /**
+     * Does not contain.
+     */
+    @XmlEnumValue("not-contains")
+    NOT_CONTAINS,
+
+    /**
+     * Exists with any value.
+     */
+    @XmlEnumValue("exists")
+    EXISTS,
+
+    /**
+     * Does not exist.
+     */
+    @XmlEnumValue("not-exists")
+    NOT_EXISTS;
   }
 
   /**
@@ -68,12 +98,18 @@ public final class ConfigurationCondition extends Condition {
   public String property;
 
   /**
-   * The value to compare against.
+   * The value to compare against; only valid if comparison type is in (=, !=, contains, !contains).
    */
   @XmlAttribute(name = "value")
   public String value;
 
   /**
+   * The value to return if comparison type is in (=, !=, contains, !contains) and the config is missing.
+   */
+  @XmlAttribute(name = "return_value_if_config_missing")
+  public boolean returnValueIfConfigMissing;
+
+  /**
    * The type of comparison to make.
    */
   @XmlAttribute(name = "comparison")
@@ -84,7 +120,7 @@ public final class ConfigurationCondition extends Condition {
    */
   @Override
   public String toString() {
-    return Objects.toStringHelper(this).add("type", type).add("property", property).add(value,
+    return Objects.toStringHelper(this).add("type", type).add("property", property).add("value",
         value).add("comparison", comparisonType).omitNullValues().toString();
   }
 
@@ -94,20 +130,40 @@ public final class ConfigurationCondition extends Condition {
   @Override
   public boolean isSatisfied(UpgradeContext upgradeContext) {
     Cluster cluster = upgradeContext.getCluster();
+
+    boolean propertyExists = false;
     Config config = cluster.getDesiredConfigByType(type);
-    if (null == config) {
-      return false;
+    Map<String, String> properties = null;
+    if (null != config) {
+      properties = config.getProperties();
+      if (properties.containsKey(property)) {
+        propertyExists = true;
+      }
+    }
+
+    if (comparisonType == ComparisonType.EXISTS) {
+      return propertyExists;
+    }
+    if (comparisonType == ComparisonType.NOT_EXISTS) {
+      return !propertyExists;
     }
 
-    Map<String, String> properties = config.getProperties();
-    if (MapUtils.isEmpty(properties)) {
-      return false;
+    // If property doesn't exist, we cannot make any claims using =, !=, contains !contains.
+    // Therefore, check if the Upgrade Pack provided a default return value when the config is missing.
+    if (!propertyExists) {
+      return returnValueIfConfigMissing;
     }
 
     String propertyValue = properties.get(property);
     switch (comparisonType) {
       case EQUALS:
         return StringUtils.equals(propertyValue, value);
+      case NOT_EQUALS:
+        return !StringUtils.equals(propertyValue, value);
+      case CONTAINS:
+        return StringUtils.contains(propertyValue, value);
+      case NOT_CONTAINS:
+        return !StringUtils.contains(propertyValue, value);
       default:
         return false;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index c1655aa..5225598 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1313,7 +1313,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
+  void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
       HostComponentDesiredStateEntity desiredStateEntity) {
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         serviceComponent.getClusterId(), serviceComponent.getServiceName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index e1f5cd2..5e887d4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -67,11 +67,10 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -91,8 +90,13 @@ public class AmbariContext {
   @Inject
   private PersistedState persistedState;
 
+  /**
+   * Used for creating read-only instances of existing {@link Config} in order
+   * to send them to the {@link ConfigGroupResourceProvider} to create
+   * {@link ConfigGroup}s.
+   */
   @Inject
-  private org.apache.ambari.server.configuration.Configuration configs;
+  ConfigFactory configFactory;
 
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
@@ -458,11 +462,13 @@ public class AmbariContext {
         SortedSet<DesiredConfig> desiredConfigsOrderedByVersion = new TreeSet<>(new Comparator<DesiredConfig>() {
           @Override
           public int compare(DesiredConfig o1, DesiredConfig o2) {
-            if (o1.getVersion() < o2.getVersion())
+            if (o1.getVersion() < o2.getVersion()) {
               return -1;
+            }
 
-            if (o1.getVersion() > o2.getVersion())
+            if (o1.getVersion() > o2.getVersion()) {
               return 1;
+            }
 
             return 0;
           }
@@ -473,9 +479,9 @@ public class AmbariContext {
         int tagMatchState = 0; // 0 -> INITIAL -> tagMatchState = 1 -> TOPLOGY_RESOLVED -> tagMatchState = 2
 
         for (DesiredConfig config: desiredConfigsOrderedByVersion) {
-          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0)
+          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0) {
             tagMatchState = 1;
-          else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
+          } else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
             tagMatchState = 2;
             break;
           }
@@ -551,7 +557,6 @@ public class AmbariContext {
           addedHost = true;
           if (! group.getHosts().containsKey(host.getHostId())) {
             group.addHost(host);
-            group.persistHostMapping();
           }
 
         } catch (AmbariException e) {
@@ -585,9 +590,7 @@ public class AmbariContext {
     for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
       String type = entry.getKey();
       String service = stack.getServiceForConfigType(type);
-      Config config = new ConfigImpl(type);
-      config.setTag(groupName);
-      config.setProperties(entry.getValue());
+      Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index d6a4bdd..d527b2d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -596,6 +596,14 @@ public class TopologyManager {
     return clusterTopologyMap.get(clusterId);
   }
 
+  /**
+   * Gets a map of components keyed by host which have operations in the
+   * {@link HostRoleStatus#PENDING} state. This could either be because hosts
+   * have not registered or becuase the operations are actually waiting to be
+   * queued.
+   *
+   * @return a mapping of host with pending components.
+   */
   public Map<String, Collection<String>> getPendingHostComponents() {
     ensureInitialized();
     Map<String, Collection<String>> hostComponentMap = new HashMap<String, Collection<String>>();
@@ -603,7 +611,16 @@ public class TopologyManager {
     for (LogicalRequest logicalRequest : allRequests.values()) {
       Map<Long, HostRoleCommandStatusSummaryDTO> summary = logicalRequest.getStageSummaries();
       final CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
-      if (status.getStatus().isInProgress()) {
+
+      // either use the calculated status of the stage or the fact that there
+      // are no tasks and the request has no end time to determine if the
+      // request is still in progress
+      boolean logicalRequestInProgress = false;
+      if (status.getStatus().isInProgress() || (summary.isEmpty() && logicalRequest.getEndTime() <= 0) ) {
+        logicalRequestInProgress = true;
+      }
+
+      if (logicalRequestInProgress) {
         Map<String, Collection<String>> requestTopology = logicalRequest.getProjectedTopology();
         for (Map.Entry<String, Collection<String>> entry : requestTopology.entrySet()) {
           String host = entry.getKey();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
index 6a8057c..4c1ef5a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
@@ -53,8 +53,8 @@ import org.apache.ambari.server.orm.entities.TopologyRequestEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.commons.lang.StringUtils;
@@ -234,12 +234,12 @@ public class HostUpdateHelper {
           boolean configUpdated;
 
           // going through all cluster configs and update property values
+          ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
           for (ClusterConfigEntity clusterConfigEntity : clusterConfigEntities) {
-            ConfigImpl config = new ConfigImpl(cluster, clusterConfigEntity, injector);
+            Config config = configFactory.createExisting(cluster, clusterConfigEntity);
             configUpdated = false;
 
             for (Map.Entry<String,String> property : config.getProperties().entrySet()) {
-
               updatedPropertyValue = replaceHosts(property.getValue(), currentHostNames, hostMapping);
 
               if (updatedPropertyValue != null) {
@@ -249,8 +249,9 @@ public class HostUpdateHelper {
                 configUpdated = true;
               }
             }
+
             if (configUpdated) {
-              config.persist(false);
+              config.save();
             }
           }
         }
@@ -317,6 +318,7 @@ public class HostUpdateHelper {
   * */
   public class StringComparator implements Comparator<String> {
 
+    @Override
     public int compare(String s1, String s2) {
       return s2.length() - s1.length();
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 1038766..dbf45c3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -17,6 +17,11 @@
  */
 package org.apache.ambari.server.upgrade;
 
+import javax.persistence.EntityManager;
+import javax.xml.bind.JAXBException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
 import java.io.File;
 import java.io.FileReader;
 import java.io.FilenameFilter;
@@ -42,11 +47,6 @@ import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import javax.persistence.EntityManager;
-import javax.xml.bind.JAXBException;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -447,11 +447,16 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
     Config hdfsSiteConfig = cluster.getDesiredConfigByType(CONFIGURATION_TYPE_HDFS_SITE);
     if (hdfsSiteConfig != null) {
       Map<String, String> properties = hdfsSiteConfig.getProperties();
+      if (properties.containsKey("dfs.internal.nameservices")) {
+        return true;
+      }
       String nameServices = properties.get(PROPERTY_DFS_NAMESERVICES);
       if (!StringUtils.isEmpty(nameServices)) {
-        String namenodes = properties.get(String.format("dfs.ha.namenodes.%s", nameServices));
-        if (!StringUtils.isEmpty(namenodes)) {
-          return (namenodes.split(",").length > 1);
+        for (String nameService : nameServices.split(",")) {
+          String namenodes = properties.get(String.format("dfs.ha.namenodes.%s", nameService));
+          if (!StringUtils.isEmpty(namenodes)) {
+            return (namenodes.split(",").length > 1);
+          }
         }
       }
     }
@@ -580,8 +585,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
         }
 
         if (!Maps.difference(oldConfigProperties, mergedProperties).areEqual()) {
-          LOG.info("Applying configuration with tag '{}' to " +
-            "cluster '{}'", newTag, cluster.getClusterName());
+          LOG.info("Applying configuration with tag '{}' and configType '{}' to " +
+            "cluster '{}'", newTag, configType, cluster.getClusterName());
 
           Map<String, Map<String, String>> propertiesAttributes = null;
           if (oldConfig != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
index db13612..eb835ef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
@@ -212,8 +212,8 @@ public class UpgradeCatalog211 extends AbstractUpgradeCatalog {
         statement = dbAccessor.getConnection().createStatement();
         if (statement != null) {
           String selectSQL = MessageFormat.format(
-              "SELECT cluster_id, service_name, component_name, host_id FROM {0}",
-              HOST_COMPONENT_STATE_TABLE);
+              "SELECT id, cluster_id, service_name, component_name, host_id FROM {0} ORDER BY {1} {2}",
+              HOST_COMPONENT_STATE_TABLE, "id", "DESC");
 
           resultSet = statement.executeQuery(selectSQL);
           while (resultSet.next()) {
@@ -221,13 +221,19 @@ public class UpgradeCatalog211 extends AbstractUpgradeCatalog {
             final String serviceName = resultSet.getString("service_name");
             final String componentName = resultSet.getString("component_name");
             final Long hostId = resultSet.getLong("host_id");
-
-            String updateSQL = MessageFormat.format(
-                "UPDATE {0} SET {1} = {2,number,#} WHERE cluster_id = {3} AND service_name = ''{4}'' AND component_name = ''{5}'' and host_id = {6,number,#}",
-                HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN, m_hcsId.getAndIncrement(),
-                clusterId, serviceName, componentName, hostId);
-
-            dbAccessor.executeQuery(updateSQL);
+            final Long idKey = resultSet.getLong("id");
+
+            if (idKey != 0 && m_hcsId.get() == 1) {
+              m_hcsId.set(idKey);
+              m_hcsId.getAndIncrement();
+            } else if(idKey == 0) {
+              String updateSQL = MessageFormat.format(
+                  "UPDATE {0} SET {1} = {2,number,#} WHERE cluster_id = {3} AND service_name = ''{4}'' AND component_name = ''{5}'' and host_id = {6,number,#}",
+                  HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN, m_hcsId.getAndIncrement(),
+                  clusterId, serviceName, componentName, hostId);
+
+              dbAccessor.executeQuery(updateSQL);
+            }
           }
         }
       } finally {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index 25b6360..063c295 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -1553,7 +1553,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   @Transactional
-  private void updateServiceComponentDesiredStateTableDDL() throws SQLException {
+  void updateServiceComponentDesiredStateTableDDL() throws SQLException {
     if (dbAccessor.tableHasPrimaryKey(SERVICE_COMPONENT_DS_TABLE, ID)) {
       LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
           SERVICE_COMPONENT_DS_TABLE, ID);
@@ -2755,7 +2755,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    *  instead of cluster_name
    */
   @Transactional
-  private void updateViewInstanceTable() throws SQLException {
+  void updateViewInstanceTable() throws SQLException {
     try {
       if (Long.class.equals(dbAccessor.getColumnClass(VIEWINSTANCE_TABLE, CLUSTER_HANDLE_COLUMN))) {
         LOG.info(String.format("%s column is already numeric. Skipping an update of %s table.", CLUSTER_HANDLE_COLUMN, VIEWINSTANCE_TABLE));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 1f44e28..e50b645 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -19,14 +19,6 @@ package org.apache.ambari.server.upgrade;
 
 import com.google.inject.Inject;
 import com.google.inject.Injector;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -40,6 +32,14 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 /**
  * Upgrade catalog for version 2.5.0.
  */
@@ -137,6 +137,8 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     updateHIVEInteractiveConfigs();
     updateTEZInteractiveConfigs();
     updateHiveLlapConfigs();
+    updateTablesForZeppelinViewRemoval();
+    updateAtlasConfigs();
   }
 
   protected void updateHostVersionTable() throws SQLException {
@@ -178,6 +180,11 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     }
   }
 
+  protected void updateTablesForZeppelinViewRemoval() throws SQLException {
+    dbAccessor.executeQuery("DELETE from viewinstance WHERE view_name='ZEPPELIN{1.0.0}'", true);
+    dbAccessor.executeQuery("DELETE from viewmain WHERE view_name='ZEPPELIN{1.0.0}'", true);
+    dbAccessor.executeQuery("DELETE from viewparameter WHERE view_name='ZEPPELIN{1.0.0}'", true);
+  }
 
   protected String updateAmsEnvContent(String content) {
     if (content == null) {
@@ -374,5 +381,33 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
       }
     }
   }
-}
 
+  protected void updateAtlasConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
+          updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
+          updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
+          updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
+        }
+      }
+    }
+  }
+
+  protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
+      Set<String> installedServices = cluster.getServices().keySet();
+      if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
+        Config configEnv = cluster.getDesiredConfigByType(configType);
+        if (configEnv != null) {
+          Map<String, String> newProperties = new HashMap<>();
+          newProperties.put(propertyName, "true");
+          boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
+          updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
+        }
+      }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/utils/RequestUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/RequestUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/RequestUtils.java
index 0ac782f..dbb0f11 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/RequestUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/RequestUtils.java
@@ -50,6 +50,9 @@ public class RequestUtils {
     if (isRemoteAddressUnknown(ip)) {
       ip = request.getRemoteAddr();
     }
+    if (containsMultipleRemoteAddresses(ip)) {
+       ip = ip.substring(0, ip.indexOf(","));
+    }
     return ip;
   }
 
@@ -76,6 +79,13 @@ public class RequestUtils {
   }
 
   /**
+   * Checks if ip contains multiple IP addresses
+   */
+  private static boolean containsMultipleRemoteAddresses(String ip) {
+    return ip != null && ip.indexOf(",") > 0;
+  }
+
+  /**
    * Checks if RequestContextHolder contains a valid HTTP request
    * @return
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 2e4372b..2cec61d 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -186,7 +186,6 @@ SETUP_OR_UPGRADE_MSG = "- If this is a new setup, then run the \"ambari-server s
 DEFAULT_DB_NAME = "ambari"
 
 SECURITY_KEYS_DIR = "security.server.keys_dir"
-DASHBOARD_PATH_PROPERTY = 'dashboards.path'
 EXTENSION_PATH_PROPERTY = 'extensions.path'
 COMMON_SERVICES_PATH_PROPERTY = 'common.services.path'
 MPACKS_STAGING_PATH_PROPERTY = 'mpacks.staging.path'
@@ -398,8 +397,8 @@ class ServerConfigDefaults(object):
     self.EXTENSION_LOCATION_DEFAULT = ""
     self.COMMON_SERVICES_LOCATION_DEFAULT = ""
     self.MPACKS_STAGING_LOCATION_DEFAULT = ""
-    self.DASHBOARD_LOCATION_DEFAULT = ""
     self.SERVER_TMP_DIR_DEFAULT = ""
+    self.DASHBOARD_DIRNAME = "dashboards"
 
     self.DEFAULT_VIEWS_DIR = ""
 
@@ -470,7 +469,6 @@ class ServerConfigDefaultsWindows(ServerConfigDefaults):
     self.EXTENSION_LOCATION_DEFAULT = "resources\\extensions"
     self.COMMON_SERVICES_LOCATION_DEFAULT = "resources\\common-services"
     self.MPACKS_STAGING_LOCATION_DEFAULT = "resources\\mpacks"
-    self.DASHBOARD_LOCATION_DEFAULT = "resources\\dashboards"
     self.SERVER_TMP_DIR_DEFAULT = "data\\tmp"
 
     self.DEFAULT_VIEWS_DIR = "resources\\views"
@@ -557,7 +555,6 @@ class ServerConfigDefaultsLinux(ServerConfigDefaults):
     self.EXTENSION_LOCATION_DEFAULT = AmbariPath.get("/var/lib/ambari-server/resources/extensions")
     self.COMMON_SERVICES_LOCATION_DEFAULT = AmbariPath.get("/var/lib/ambari-server/resources/common-services")
     self.MPACKS_STAGING_LOCATION_DEFAULT = AmbariPath.get("/var/lib/ambari-server/resources/mpacks")
-    self.DASHBOARD_LOCATION_DEFAULT = AmbariPath.get("/var/lib/ambari-server/resources/dashboards")
     self.SERVER_TMP_DIR_DEFAULT = AmbariPath.get("/var/lib/ambari-server/data/tmp")
 
     self.DEFAULT_VIEWS_DIR = AmbariPath.get("/var/lib/ambari-server/resources/views")
@@ -1438,13 +1435,8 @@ def get_mpacks_staging_location(properties):
 # Dashboard location
 #
 def get_dashboard_location(properties):
-  try:
-    dashboard_location = properties[DASHBOARD_PATH_PROPERTY]
-  except KeyError:
-    dashboard_location = configDefaults.DASHBOARD_LOCATION_DEFAULT
-
-  if not dashboard_location:
-    dashboard_location = configDefaults.DASHBOARD_LOCATION_DEFAULT
+  resources_dir = get_resources_location(properties)
+  dashboard_location = os.path.join(resources_dir, configDefaults.DASHBOARD_DIRNAME)
   return dashboard_location
 
 #

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/python/ambari_server/serverUpgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 7d7a9ab..6f17900 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -163,7 +163,7 @@ def run_stack_upgrade(args, stackName, stackVersion, repo_url, repo_url_os):
                                             "updateStackId",
                                             "'" + json.dumps(stackId) + "'")
   (retcode, stdout, stderr) = run_os_command(command)
-  print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+  print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
   if retcode > 0:
     print_error_msg("Error executing stack upgrade, please check the server logs.")
   return retcode
@@ -182,10 +182,9 @@ def run_metainfo_upgrade(args, keyValueMap=None):
                                               'updateMetaInfo',
                                               "'" + json.dumps(keyValueMap) + "'")
     (retcode, stdout, stderr) = run_os_command(command)
-    print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+    print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
     if retcode > 0:
-      print_error_msg("Error executing metainfo upgrade, please check the "
-                      "server logs.")
+      print_error_msg("Error executing metainfo upgrade, please check the server logs.")
 
   return retcode
 
@@ -195,7 +194,7 @@ def run_metainfo_upgrade(args, keyValueMap=None):
 #
 
 def change_objects_owner(args):
-  print 'Fixing database objects owner'
+  print_info_msg('Fixing database objects owner', True)
 
   properties = Properties()   #Dummy, args contains the dbms name and parameters already
 
@@ -228,8 +227,8 @@ def upgrade_local_repo(args):
 
     repo_file = os.path.join(stack_root, stack_version_local, "repos", "repoinfo.xml")
 
-    print_info_msg("Local repo file: " + repo_file_local)
-    print_info_msg("Repo file: " + repo_file_local)
+    print_info_msg("Local repo file: {0}".format(repo_file_local))
+    print_info_msg("Repo file: {0}".format(repo_file_local))
 
     metainfo_update_items = {}
 
@@ -267,7 +266,7 @@ def run_schema_upgrade(args):
 
   ensure_jdbc_driver_is_installed(args, get_ambari_properties())
 
-  print 'Upgrading database schema'
+  print_info_msg('Upgrading database schema', True)
 
   serverClassPath = ServerClassPath(get_ambari_properties(), args)
   class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell(validate_classpath=True)
@@ -284,19 +283,19 @@ def run_schema_upgrade(args):
   environ = generate_env(args, ambari_user, current_user)
 
   (retcode, stdout, stderr) = run_os_command(command, env=environ)
-  print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
+  print_info_msg("Return code from schema upgrade command, retcode = {0}".format(str(retcode)), True)
   if stdout:
-    print "Console output from schema upgrade command:"
-    print stdout
-    print
-  if stderr:
-    print "Error output from schema upgrade command:"
-    print stderr
+    print_info_msg("Console output from schema upgrade command:", True)
+    print_info_msg(stdout, True)
     print
   if retcode > 0:
     print_error_msg("Error executing schema upgrade, please check the server logs.")
+    if stderr:
+      print_error_msg("Error output from schema upgrade command:")
+      print_error_msg(stderr)
+      print
   else:
-    print_info_msg('Schema upgrade completed')
+    print_info_msg('Schema upgrade completed', True)
   return retcode
 
 
@@ -335,16 +334,17 @@ def move_user_custom_actions():
     raise FatalException(1, err)
 
 def upgrade(args):
-  logger.info("Upgrade ambari-server.")
+  print_info_msg("Upgrade Ambari Server", True)
   if not is_root():
     err = configDefaults.MESSAGE_ERROR_UPGRADE_NOT_ROOT
     raise FatalException(4, err)
-  print 'Updating properties in ' + AMBARI_PROPERTIES_FILE + ' ...'
+  print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_PROPERTIES_FILE), True)
   retcode = update_ambari_properties()
   if not retcode == 0:
     err = AMBARI_PROPERTIES_FILE + ' file can\'t be updated. Exiting'
     raise FatalException(retcode, err)
 
+  print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_ENV_FILE), True)
   retcode = update_ambari_env()
   if not retcode == 0:
     err = AMBARI_ENV_FILE + ' file can\'t be updated. Exiting'
@@ -354,7 +354,7 @@ def upgrade(args):
   if retcode == -2:
     pass  # no changes done, let's be silent
   elif retcode == 0:
-    print 'File ' + AMBARI_KRB_JAAS_LOGIN_FILE + ' updated.'
+    print_info_msg("File {0} updated.".format(AMBARI_KRB_JAAS_LOGIN_FILE), True)
   elif not retcode == 0:
     err = AMBARI_KRB_JAAS_LOGIN_FILE + ' file can\'t be updated. Exiting'
     raise FatalException(retcode, err)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/python/ambari_server/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/utils.py b/ambari-server/src/main/python/ambari_server/utils.py
index 62c93ae..4c8304b 100644
--- a/ambari-server/src/main/python/ambari_server/utils.py
+++ b/ambari-server/src/main/python/ambari_server/utils.py
@@ -125,17 +125,18 @@ def save_main_pid_ex(pids, pidfile, exclude_list=[], skip_daemonize=False):
   """
   pid_saved = False
   try:
-    pfile = open(pidfile, "w")
-    for item in pids:
-      if pid_exists(item["pid"]) and (item["exe"] not in exclude_list):
-        pfile.write("%s\n" % item["pid"])
-        pid_saved = True
-        logger.info("Ambari server started with PID " + str(item["pid"]))
-      if pid_exists(item["pid"]) and (item["exe"] in exclude_list) and not skip_daemonize:
-        try:
-          os.kill(int(item["pid"]), signal.SIGKILL)
-        except:
-          pass
+    if pids:
+      pfile = open(pidfile, "w")
+      for item in pids:
+        if pid_exists(item["pid"]) and (item["exe"] not in exclude_list):
+          pfile.write("%s\n" % item["pid"])
+          pid_saved = True
+          logger.info("Ambari server started with PID " + str(item["pid"]))
+        if pid_exists(item["pid"]) and (item["exe"] in exclude_list) and not skip_daemonize:
+          try:
+            os.kill(int(item["pid"]), signal.SIGKILL)
+          except:
+            pass
   except IOError as e:
     logger.error("Failed to write PID to " + pidfile + " due to " + str(e))
     pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/python/ambari_server_main.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py
index 572de4e..6c77522 100644
--- a/ambari-server/src/main/python/ambari_server_main.py
+++ b/ambari-server/src/main/python/ambari_server_main.py
@@ -28,6 +28,7 @@ from ambari_commons.logging_utils import get_debug_mode, print_warning_msg, prin
 from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons.os_utils import is_root, run_os_command
+from ambari_server.ambariPath import AmbariPath
 from ambari_server.dbConfiguration import ensure_dbms_is_running, ensure_jdbc_driver_is_installed
 from ambari_server.serverConfiguration import configDefaults, find_jdk, get_ambari_properties, \
   get_conf_dir, get_is_persisted, get_is_secure, get_java_exe_path, get_original_master_key, read_ambari_user, \
@@ -120,6 +121,8 @@ AMBARI_SERVER_NOT_STARTED_MSG = "Ambari Server java process hasn't been started
 ULIMIT_OPEN_FILES_KEY = 'ulimit.open.files'
 ULIMIT_OPEN_FILES_DEFAULT = 10000
 
+AMBARI_ENV_FILE = AmbariPath.get("/var/lib/ambari-server/ambari-env.sh")
+
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def ensure_server_security_is_configured():
   pass
@@ -178,9 +181,10 @@ def generate_child_process_param_list(ambari_user, java_exe, class_path,
     # from subprocess, we have to skip --login option of su command. That's why
     # we change dir to / (otherwise subprocess can face with 'permission denied'
     # errors while trying to list current directory
-    cmd = "{ulimit_cmd} ; {su} {ambari_user} -s {sh_shell} -c '{command}'".format(ulimit_cmd=ulimit_cmd, 
+    cmd = "{ulimit_cmd} ; {su} {ambari_user} -s {sh_shell} -c 'source {ambari_env_file} ; {command}'".format(ulimit_cmd=ulimit_cmd,
                                                                                 su=locate_file('su', '/bin'), ambari_user=ambari_user,
-                                                                                sh_shell=locate_file('sh', '/bin'), command=command)
+                                                                                sh_shell=locate_file('sh', '/bin'), command=command,
+                                                                                ambari_env_file=AMBARI_ENV_FILE)
   else:
     cmd = "{ulimit_cmd} ; {command}".format(ulimit_cmd=ulimit_cmd, command=command)
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index b01ed2f..82ce31e 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -26,7 +26,16 @@ delimiter ;
 
 # USE @schema;
 
-SET default_storage_engine=INNODB;
+-- Set default_storage_engine to InnoDB
+-- storage_engine variable should be used for versions prior to MySQL 5.6
+set @version_short = substring_index(@@version, '.', 2);
+set @major = cast(substring_index(@version_short, '.', 1) as SIGNED);
+set @minor = cast(substring_index(@version_short, '.', -1) as SIGNED);
+set @engine_stmt = IF(@major >= 5 AND @minor>=6, 'SET default_storage_engine=INNODB', 'SET storage_engine=INNODB');
+prepare statement from @engine_stmt;
+execute statement;
+DEALLOCATE PREPARE statement;
+
 
 CREATE TABLE stack(
   stack_id BIGINT NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index e00c1f5..53cf002 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -122,7 +122,7 @@ info_num_logs = config['configurations']['accumulo-log4j']['info_num_logs']
 # metrics2 properties
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
index 8e1051b..b94812b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
@@ -34,6 +34,7 @@
           <commandScript>
             <script>scripts/infra_solr.py</script>
             <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
           </commandScript>
           <logs>
             <log>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
index 41cb504..5143cbb 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
@@ -112,4 +112,5 @@ def create_ambari_solr_znode():
   solr_cloud_util.create_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
\ No newline at end of file
+    java64_home=params.java64_home,
+    retry=30, interval=5)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index 6651b6c..81fa4c7 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -82,7 +82,7 @@
   </property>
   <property>
     <name>failover_strategy_blacklisted_interval</name>
-    <value>600</value>
+    <value>300</value>
     <description>
       Metrics collector host will be blacklisted for specified number of seconds if metric monitor failed to connect to it.
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index b7008cf..76f8660 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -716,4 +716,16 @@
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>cluster.zookeeper.quorum</name>
+    <value>{{cluster_zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the cluster ZooKeeper Quorum.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cluster.zookeeper.property.clientPort</name>
+    <value>{{cluster_zookeeper_clientPort}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 43a4320..740a91a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -101,6 +101,7 @@
           <category>MASTER</category>
           <cardinality>0-1</cardinality>
           <versionAdvertised>false</versionAdvertised>
+          <customFolder>dashboards</customFolder>
           <commandScript>
             <script>scripts/metrics_grafana.py</script>
             <scriptType>PYTHON</scriptType>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index feabdb2..d9fd9f6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -364,6 +364,7 @@ def ams(name=None, action=None):
     Directory(params.ams_monitor_pid_dir,
               owner=params.ams_user,
               group=params.user_group,
+              cd_access="a",
               mode=0755,
               create_parents = True
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
index 214c1e8..99df380 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
@@ -38,6 +38,8 @@ class AmsCollector(Script):
   def configure(self, env, action = None):
     import params
     env.set_params(params)
+    if action == 'start' and params.embedded_mode_multiple_instances:
+      raise Fail("AMS in embedded mode cannot have more than 1 instance. Delete all but 1 instances or switch to Distributed mode ")
     hbase('master', action)
     hbase('regionserver', action)
     ams(name='collector')


[04/25] ambari git commit: Merge from branch-2.5

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
new file mode 100644
index 0000000..1d5adff
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -0,0 +1,704 @@
+{
+    "localComponents": [
+        "NAMENODE", 
+        "SECONDARY_NAMENODE", 
+        "ZOOKEEPER_SERVER", 
+        "DATANODE", 
+        "HDFS_CLIENT", 
+        "ZOOKEEPER_CLIENT", 
+        "RANGER_USERSYNC", 
+        "RANGER_ADMIN", 
+        "RANGER_TAGSYNC",
+        "LOGSEARCH_SOLR",
+        "LOGSEARCH_SOLR_CLIENT"
+    ], 
+    "configuration_attributes": {
+        "ranger-hdfs-audit": {}, 
+        "ssl-client": {}, 
+        "ranger-admin-site": {}, 
+        "ranger-hdfs-policymgr-ssl": {}, 
+        "tagsync-application-properties": {}, 
+        "ranger-env": {}, 
+        "usersync-log4j": {}, 
+        "admin-properties": {}, 
+        "ranger-ugsync-site": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.datanode.data.dir": "true", 
+                "dfs.namenode.http-address": "true", 
+                "dfs.datanode.failed.volumes.tolerated": "true", 
+                "dfs.support.append": "true", 
+                "dfs.namenode.name.dir": "true", 
+                "dfs.webhdfs.enabled": "true"
+            }
+        }, 
+        "ranger-tagsync-site": {}, 
+        "zoo.cfg": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        }, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "ssl-server": {}, 
+        "ranger-site": {}, 
+        "admin-log4j": {}, 
+        "tagsync-log4j": {}, 
+        "ranger-hdfs-security": {}, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {},
+        "infra-solr-env": {},
+        "infra-solr-client-log4j": {},
+        "cluster-env": {}
+    }, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "commandId": "11-0", 
+    "hostname": "c6401.ambari.apache.org", 
+    "kerberosCommandParams": [], 
+    "serviceName": "RANGER", 
+    "role": "RANGER_ADMIN", 
+    "forceRefreshConfigTagsBeforeExecution": [], 
+    "requestId": 11, 
+    "agentConfigParams": {
+        "agent": {
+            "parallel_execution": 0
+        }
+    }, 
+    "clusterName": "c1", 
+    "commandType": "EXECUTION_COMMAND", 
+    "taskId": 31, 
+    "roleParams": {}, 
+    "configurationTags": {
+        "ranger-hdfs-audit": {
+            "tag": "version1466705299922"
+        }, 
+        "ssl-client": {
+            "tag": "version1"
+        }, 
+        "ranger-admin-site": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "tag": "version1466705299922"
+        }, 
+        "tagsync-application-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-env": {
+            "tag": "version1466705299949"
+        }, 
+        "usersync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-ugsync-site": {
+            "tag": "version1466705299949"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "ranger-tagsync-site": {
+            "tag": "version1466705299949"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1466705299922"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "hadoop-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "ssl-server": {
+            "tag": "version1"
+        }, 
+        "ranger-site": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "tagsync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-security": {
+            "tag": "version1466705299922"
+        }, 
+        "usersync-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1"
+        },
+        "infra-solr-env": {
+            "tag": "version1467098537360"
+        },
+        "infra-solr-client-log4j": {
+            "tag": "version1467096917836"
+        },
+        "cluster-env": {
+            "tag": "version1"
+        }
+    }, 
+    "roleCommand": "START", 
+    "hostLevelParams": {
+        "agent_stack_retry_on_unavailability": "false", 
+        "stack_name": "HDP", 
+        "package_version": "2_5_0_0_*", 
+        "custom_mysql_jdbc_name": "mysql-connector-java.jar",
+        "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
+        "host_sys_prepped": "false", 
+        "ambari_db_rca_username": "mapred", 
+        "current_version": "2.6.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
+        "agent_stack_retry_count": "5", 
+        "stack_version": "2.6",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "repository_version_id": "1", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "java_version": "8", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
+        "db_name": "ambari", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "ambari_db_rca_password": "mapred", 
+        "jce_name": "jce_policy-8.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/RANGER/0.4.0/package", 
+        "script": "scripts/ranger_admin.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.6.0.0-801",
+        "max_duration_for_retries": "0", 
+        "command_retry_enabled": "false", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "forceRefreshConfigTags": [], 
+    "stageId": 0, 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_use_ssl": [
+            "false"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "ranger_tagsync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ranger_usersync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_port": [
+            "8080"
+        ], 
+        "ranger_admin_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "172.22.83.73"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6401.ambari.apache.org"
+        ],
+        "infra_solr_hosts": [
+            "c6401.ambari.apache.org"
+        ]
+    }, 
+    "configurations": {
+        "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "ssl-client": {
+            "ssl.client.truststore.reload.interval": "10000", 
+            "ssl.client.keystore.password": "bigdata", 
+            "ssl.client.truststore.type": "jks", 
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
+            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.keystore.type": "jks"
+        }, 
+        "ranger-admin-site": {
+            "ranger.admin.kerberos.cookie.domain": "",
+            "ranger.kms.service.user.hdfs": "hdfs", 
+            "ranger.spnego.kerberos.principal": "", 
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.plugins.hive.serviceuser": "hive", 
+            "ranger.lookup.kerberos.keytab": "", 
+            "ranger.plugins.kms.serviceuser": "kms", 
+            "ranger.service.https.attrib.ssl.enabled": "false", 
+            "ranger.sso.browser.useragent": "Mozilla,chrome", 
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
+            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.plugins.hdfs.serviceuser": "hdfs", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
+            "ranger.plugins.knox.serviceuser": "knox", 
+            "ranger.ldap.base.dn": "dc=example,dc=com", 
+            "ranger.sso.publicKey": "", 
+            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.service.https.attrib.clientAuth": "want", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
+            "ranger.ldap.group.roleattribute": "cn", 
+            "ranger.plugins.kafka.serviceuser": "kafka", 
+            "ranger.admin.kerberos.principal": "", 
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
+            "ranger.ldap.referral": "ignore", 
+            "ranger.service.http.port": "6080", 
+            "ranger.ldap.user.searchfilter": "(uid={0})", 
+            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.truststore.password": "changeit", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.password": "NONE", 
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
+            "ranger.lookup.kerberos.principal": "", 
+            "ranger.service.https.port": "6182", 
+            "ranger.plugins.storm.serviceuser": "storm", 
+            "ranger.externalurl": "{{ranger_external_url}}", 
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.kms.service.user.hive": "", 
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
+            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure", 
+            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.username": "ranger_solr", 
+            "ranger.sso.enabled": "false", 
+            "ranger.audit.solr.urls": "", 
+            "ranger.ldap.ad.domain": "", 
+            "ranger.plugins.yarn.serviceuser": "yarn", 
+            "ranger.audit.source.type": "solr", 
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.authentication.method": "UNIX", 
+            "ranger.service.http.enabled": "true", 
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
+            "ranger.ldap.ad.referral": "ignore", 
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
+            "ranger.jpa.jdbc.password": "_", 
+            "ranger.spnego.kerberos.keytab": "", 
+            "ranger.sso.providerurl": "", 
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
+            "ranger.admin.kerberos.keytab": "", 
+            "ranger.admin.kerberos.token.valid.seconds": "30", 
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.unixauth.service.port": "5151"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
+        }, 
+        "tagsync-application-properties": {
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.bootstrap.servers": "localhost:6667"
+        }, 
+        "ranger-env": {
+            "ranger_solr_shards": "1", 
+            "ranger_solr_config_set": "ranger_audits", 
+            "ranger_user": "ranger", 
+            "ranger_solr_replication_factor": "1",
+            "xml_configurations_supported": "true", 
+            "ranger-atlas-plugin-enabled": "No", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "ranger-yarn-plugin-enabled": "No", 
+            "bind_anonymous": "false", 
+            "ranger_admin_username": "amb_ranger_admin", 
+            "admin_password": "admin", 
+            "is_solrCloud_enabled": "true", 
+            "ranger-storm-plugin-enabled": "No", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "ranger_group": "ranger", 
+            "ranger-knox-plugin-enabled": "No", 
+            "ranger_admin_log_dir": "/var/log/ranger/admin", 
+            "ranger-kafka-plugin-enabled": "No", 
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
+            "ranger-hive-plugin-enabled": "No", 
+            "xasecure.audit.destination.solr": "true", 
+            "ranger_pid_dir": "/var/run/ranger", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "admin_username": "admin", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "create_db_dbuser": "true", 
+            "ranger_solr_collection_name": "ranger_audits", 
+            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_usersync_log_dir": "/var/log/ranger/usersync"
+        }, 
+        "usersync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
+        }, 
+        "admin-properties": {
+            "db_user": "rangeradmin01", 
+            "DB_FLAVOR": "MYSQL", 
+            "db_password": "rangeradmin01", 
+            "db_root_user": "root", 
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
+            "db_name": "ranger01", 
+            "db_host": "c6401.ambari.apache.org", 
+            "db_root_password": "vagrant", 
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
+        }, 
+        "ranger-ugsync-site": {
+            "ranger.usersync.ldap.binddn": "", 
+            "ranger.usersync.policymgr.username": "rangerusersync", 
+            "ranger.usersync.policymanager.mockrun": "false", 
+            "ranger.usersync.group.searchbase": "", 
+            "ranger.usersync.ldap.bindalias": "testldapalias", 
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
+            "ranger.usersync.port": "5151", 
+            "ranger.usersync.pagedresultssize": "500", 
+            "ranger.usersync.group.memberattributename": "", 
+            "ranger.usersync.kerberos.principal": "", 
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
+            "ranger.usersync.ldap.referral": "ignore", 
+            "ranger.usersync.group.searchfilter": "", 
+            "ranger.usersync.ldap.user.objectclass": "person", 
+            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
+            "ranger.usersync.ldap.user.searchfilter": "", 
+            "ranger.usersync.ldap.groupname.caseconversion": "none", 
+            "ranger.usersync.ldap.ldapbindpassword": "", 
+            "ranger.usersync.unix.minUserId": "500", 
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
+            "ranger.usersync.group.nameattribute": "", 
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
+            "ranger.usersync.user.searchenabled": "false", 
+            "ranger.usersync.group.usermapsyncenabled": "true", 
+            "ranger.usersync.ldap.bindkeystore": "", 
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
+            "ranger.usersync.kerberos.keytab": "", 
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
+            "ranger.usersync.group.objectclass": "", 
+            "ranger.usersync.ldap.user.searchscope": "sub", 
+            "ranger.usersync.unix.password.file": "/etc/passwd", 
+            "ranger.usersync.ldap.user.nameattribute": "", 
+            "ranger.usersync.pagedresultsenabled": "true", 
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
+            "ranger.usersync.group.search.first.enabled": "false", 
+            "ranger.usersync.group.searchenabled": "false", 
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
+            "ranger.usersync.ssl": "true", 
+            "ranger.usersync.ldap.url": "", 
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.ldap.user.searchbase": "", 
+            "ranger.usersync.ldap.username.caseconversion": "none", 
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.keystore.password": "UnIx529p", 
+            "ranger.usersync.unix.group.file": "/etc/group", 
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
+            "ranger.usersync.group.searchscope": "", 
+            "ranger.usersync.truststore.password": "changeit", 
+            "ranger.usersync.enabled": "true", 
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.filesource.text.delimiter": ","
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.content-summary.limit": "5000", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:50010", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.namenode.audit.log.async": "true", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.blocksize": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.namenode.fslock.fair": "false", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "50", 
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.datanode.http.address": "0.0.0.0:50075", 
+            "dfs.datanode.du.reserved": "33011188224", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.client.retry.policy.enabled": "false", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.encryption.key.provider.uri": "", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
+        }, 
+        "ranger-tagsync-site": {
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
+            "ranger.tagsync.source.atlasrest.username": "", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
+            "ranger.tagsync.source.file.check.interval.millis": "", 
+            "ranger.tagsync.source.atlasrest.endpoint": "", 
+            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
+            "ranger.tagsync.kerberos.principal": "", 
+            "ranger.tagsync.kerberos.keytab": "", 
+            "ranger.tagsync.source.atlas": "false", 
+            "ranger.tagsync.source.atlasrest": "false", 
+            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.file.filename": ""
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/grid/0/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "hadoop.rpc.protection": "authentication", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "core-site": {
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "fs.trash.interval": "360", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "hadoop.security.authentication": "simple", 
+            "hadoop.proxyuser.root.groups": "*", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.key.provider.path": "", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.security.authorization": "false", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "ipc.server.tcpnodelay": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "DEFAULT", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "ipc.client.idlethreshold": "8000", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
+        }, 
+        "hadoop-env": {
+            "keyserver_port": "", 
+            "proxyuser_group": "users", 
+            "hdfs_user_nproc_limit": "65536", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user_nofile_limit": "128000", 
+            "hdfs_user": "hdfs", 
+            "keyserver_host": " ", 
+            "namenode_opt_maxnewsize": "128m", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "128m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_tmp_dir": "/tmp"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "ssl-server": {
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
+            "ssl.server.keystore.keypassword": "bigdata", 
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
+            "ssl.server.keystore.password": "bigdata", 
+            "ssl.server.truststore.password": "bigdata", 
+            "ssl.server.truststore.type": "jks", 
+            "ssl.server.keystore.type": "jks", 
+            "ssl.server.truststore.reload.interval": "10000"
+        }, 
+        "ranger-site": {}, 
+        "admin-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
+        }, 
+        "tagsync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
+        }, 
+        "ranger-hdfs-security": {
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "xasecure.add-hadoop-authorization": "true"
+        }, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_server_heapsize": "1024m", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_user": "zookeeper"
+        },
+        "infra-solr-env": {
+            "infra_solr_datadir": "/opt/logsearch_solr/data",
+            "infra_solr_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks",
+            "infra_solr_kerberos_name_rules": "DEFAULT",
+            "infra_solr_user": "infra-solr",
+            "infra_solr_maxmem": "1024",
+            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME={{java64_home}}\n\n# Increase Java Min/Max Heap as needed to support your indexing / query needs\nSOLR_JAVA_MEM=\"-Xms{{logsearch_solr_min_mem}}m -Xmx{{logsearch_solr_max_mem}}m\"\n\n# Enable verbose GC logging\nGC_LOG_OPTS=\"-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \\\n-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime\"\n\n# These GC settings have shown to work well for a number of common Solr workloads\nGC_TUNE=\"-XX:NewRatio=3 \\\n-XX:SurvivorRatio=4 \\\n-XX:TargetSurvivorRatio=90 \\\n-XX:MaxTenuringThreshold=8 \\\n-XX:+UseConcMarkSweepGC \\\n-XX:+UseParNewGC \\\n-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \\\n-XX:+CMSScavengeBeforeRemark \\\n-XX:PretenureSizeThreshold=64m \\\n-XX:+UseCMSInitiatingOccupancyOnly \\\n-XX:CMSInitiatingOccupancyFraction=50 \\\n-XX:CMSMaxAbortablePrecleanTime=6000 \\\n-XX:+CMSParallelRemarkEnabled \\\n-XX:+ParallelRefProcEnabled\"\
 n\n# Set the ZooKeeper connection string if using an external ZooKeeper ensemble\n# e.g. host1:2181,host2:2181/chroot\n# Leave empty if not using SolrCloud\nZK_HOST=\"{{zookeeper_quorum}}{{logsearch_solr_znode}}\"\n\n# Set the ZooKeeper client timeout (for SolrCloud mode)\nZK_CLIENT_TIMEOUT=\"60000\"\n\n# By default the start script uses \"localhost\"; override the hostname here\n# for production SolrCloud environments to control the hostname exposed to cluster state\n#SOLR_HOST=\"192.168.1.1\"\n\n# By default the start script uses UTC; override the timezone if needed\n#SOLR_TIMEZONE=\"UTC\"\n\n# Set to true to activate the JMX RMI connector to allow remote JMX client applications\n# to monitor the JVM hosting Solr; set to \"false\" to disable that behavior\n# (false is recommended in production environments)\nENABLE_REMOTE_JMX_OPTS=\"true\"\n\n# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here\nRMI_PORT={{logsearch_solr_jmx_port}}\n\n# Anything you add to
  the SOLR_OPTS variable will be included in the java\n# start command line as-is, in ADDITION to other options. If you specify the\n# -a option on start script, those options will be appended as well. Examples:\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.clustering.enabled=true\"\n\n# Location where the bin/solr script will save PID files for running instances\n# If not set, the script will create PID files in $SOLR_TIP/bin\nSOLR_PID_DIR={{logsearch_solr_piddir}}\n\n# Path to a directory where Solr creates index files, the specified directory\n# must contain a solr.xml; by default, Solr will use server/solr\nSOLR_HOME={{logsearch_solr_datadir}}\n\n# Solr provides a default Log4J configuration properties file in server/resources\n# however, you may want to customize the log settings and file appender location\n# so you can point the script to use a different log4j.properties f
 ile\nLOG4J_PROPS={{logsearch_solr_conf}}/log4j.properties\n\n# Location where Solr should write logs to; should agree with the file appender\n# settings in server/resources/log4j.properties\nSOLR_LOGS_DIR={{logsearch_solr_log_dir}}\n\n# Sets the port Solr binds to, default is 8983\nSOLR_PORT={{logsearch_solr_port}}\n\n# Be sure to update the paths to the correct keystore for your environment\n{% if logsearch_solr_ssl_enabled %}\nSOLR_SSL_KEY_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_KEY_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_TRUST_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_TRUST_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_NEED_CLIENT_AUTH=false\nSOLR_SSL_WANT_CLIENT_AUTH=false\n{% endif %}\n\n# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set\n# and you are using SSL, then the start script will use SOLR_PORT for the SSL port\n#SOLR_SSL_PORT=\n\n{% if security_enabled -%}\nSOLR_HOST=`hostname -f`\nSOLR
 _JAAS_FILE={{logsearch_solr_jaas_file}}\nSOLR_KERB_KEYTAB={{logsearch_solr_web_kerberos_keytab}}\nSOLR_KERB_PRINCIPAL={{logsearch_solr_web_kerberos_principal}}\nSOLR_KERB_NAME_RULES={{logsearch_solr_kerberos_name_rules}}\n\nSOLR_AUTHENTICATION_CLIENT_CONFIGURER=\"org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer\"\nSOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST} -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}\"\n{% endif %}", 
+            "infra_solr_pid_dir": "/var/run/ambari-infra-solr",
+            "infra_solr_truststore_password": "bigdata",
+            "infra_solr_truststore_type": "jks",
+            "infra_solr_keystore_type": "jks",
+            "infra_solr_log_dir": "/var/log/ambari-infra-solr",
+            "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "infra_solr_ssl_enabled": "false",
+            "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client",
+            "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM",
+            "infra_solr_znode": "/infra-solr",
+            "infra_solr_keystore_password": "bigdata",
+            "infra_solr_port": "8886",
+            "infra_solr_kerberos_principal": "infra-solr/_HOST@EXAMPLE.COM",
+            "infra_solr_jmx_port": "18886",
+            "infra_solr_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks",
+            "infra_solr_minmem": "512",
+            "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra-solr.service.keytab"
+        },
+        "infra-solr-client-log4j": {
+            "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+            "content": "# Copyright 2011 The Apache Software Foundation\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nlog4j.rootLogger=INFO,file,stdout,stderr\n\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender
 .file.File={{logsearch_solr_client_log}}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.Threshold=INFO\nlog4j.appender.stdout.Target=System.out\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%m%n\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.Threshold=ERROR\nlog4j.appender.stderr.Target=System.err\nlog4j.appender.stderr.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stderr.layout.ConversionPattern=%m%n"
+        },
+        "cluster-env": {
+            "security_enabled": "false", 
+            "override_uid": "true", 
+            "fetch_nonlocal_groups": "true", 
+            "one_dir_per_partition": "true", 
+            "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", 
+            "ignore_groupsusers_create": "false", 
+            "alerts_repeat_tolerance": "1", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "manage_dirs_on_root": "true",
+            "recovery_lifetime_max_count": "1024", 
+            "recovery_type": "AUTO_START", 
+            "ignore_bad_mounts": "false", 
+            "recovery_window_in_minutes": "60", 
+            "user_group": "hadoop", 
+            "stack_tools": "{\n  \"stack_selector\": [\"hdp-select\", \"/usr/bin/hdp-select\", \"hdp-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}", 
+            "recovery_retry_interval": "5", 
+            "stack_features": "{\n  \"stack_features\": [\n    {\n      \"name\": \"snappy\",\n      \"description\": \"Snappy compressor/decompressor support\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"lzo\",\n      \"description\": \"LZO libraries support\",\n      \"min_version\": \"2.2.1.0\"\n    },\n    {\n      \"name\": \"express_upgrade\",\n      \"description\": \"Express upgrade support\",\n      \"min_version\": \"2.1.0.0\"\n    },\n    {\n      \"name\": \"rolling_upgrade\",\n      \"description\": \"Rolling upgrade support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"config_versioning\",\n      \"description\": \"Configurable versions support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"datanode_non_root\",\n      \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\
 ": \"remove_ranger_hdfs_plugin_env\",\n      \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger\",\n      \"description\": \"Ranger Service support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_tagsync_component\",\n      \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"phoenix\",\n      \"description\": \"Phoenix Service support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"nfs\",\n      \"description\": \"NFS support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"tez_for_spark\",\n      \"description\": \"Tez dependency for Spark\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"timeline_state_store\",\n      \"description\": \"Yarn application timeline-se
 rvice supports state store property (AMBARI-11442)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"copy_tarball_to_hdfs\",\n      \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"spark_16plus\",\n      \"description\": \"Spark 1.6+\",\n      \"min_version\": \"2.4.0.0\"\n    },\n    {\n      \"name\": \"spark_thriftserver\",\n      \"description\": \"Spark Thrift Server\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"storm_kerberos\",\n      \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"storm_ams\",\n      \"description\": \"Storm AMS integration (AMBARI-10710)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"create_kafka_broker_id\",\n      \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n      \"min_version\": \"2.2
 .0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_listeners\",\n      \"description\": \"Kafka listeners (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_kerberos\",\n      \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"pig_on_tez\",\n      \"description\": \"Pig on Tez support (AMBARI-7863)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_non_root\",\n      \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger_audit_db_support\",\n      \"description\": \"Ranger Audit to DB support\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"accumulo_kerberos_user_auth\",\n      \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n
       \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"knox_versioned_data_dir\",\n      \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"knox_sso_topology\",\n      \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n      \"min_version\": \"2.3.8.0\"\n    },\n    {\n      \"name\": \"atlas_rolling_upgrade\",\n      \"description\": \"Rolling upgrade support for Atlas\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"oozie_admin_user\",\n      \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_create_hive_tez_configs\",\n      \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_setup_shared_lib\",\n      \"description\": \"Oozie 
 setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_host_kerberos\",\n      \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"falcon_extensions\",\n      \"description\": \"Falcon Extension\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_upgrade_schema\",\n      \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server_interactive\",\n      \"description\": \"Hive server interactive support (AMBARI-15573)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_webhcat_specific_configs\",\n      \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n      \"min_ver
 sion\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_purge_table\",\n      \"description\": \"Hive purge table support (AMBARI-12260)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server2_kerberized_env\",\n      \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n      \"min_version\": \"2.2.3.0\",\n      \"max_version\": \"2.2.5.0\"\n    },\n    {\n      \"name\": \"hive_env_heapsize\",\n      \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_hsm_support\",\n      \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_log4j_support\",\n      \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kerberos_support\",\n      \"desc
 ription\": \"Ranger Kerberos support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_site_support\",\n      \"description\": \"Hive Metastore site support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_password_jceks\",\n      \"description\": \"Saving Ranger Usersync credentials in jceks\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_install_infra_client\",\n      \"description\": \"LogSearch Service support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hbase_home_directory\",\n      \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"spark_livy\",\n      \"description\": \"Livy as slave component of spark\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"atlas_ranger_plugin_support\",\n      \"description\": \"Atlas Ranger plug
 in support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_pid_support\",\n      \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_pid_support\",\n      \"description\": \"Ranger KMS Service support pid generation\",\n      \"min_version\": \"2.5.0.0\"\n    }\n  ]\n}",
+            "recovery_enabled": "true", 
+            "recovery_max_count": "6", 
+            "stack_root": "/usr/hdp", 
+            "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0", 
+            "managed_hdfs_resource_property_names": "", 
+            "smokeuser": "ambari-qa"
+        }
+    }
+}
\ No newline at end of file