You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2016/12/09 21:56:53 UTC

[01/51] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-18456' into trunk

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18901 bb8309ed0 -> 354280307


http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
index 596f381..5c8d174 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
@@ -384,7 +384,7 @@ public class HostTest {
     clusters.mapHostToCluster("h1", "c1");
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(c1, "global",
+    Config config = configFactory.createNew(c1, "global", "v1",
         new HashMap<String,String>() {{ put("a", "b"); put("x", "y"); }}, new HashMap<String, Map<String,String>>());
 
     try {
@@ -396,16 +396,14 @@ public class HostTest {
     }
 
 
-    config.setTag("v1");
     host.addDesiredConfig(c1.getClusterId(), true, "_test", config);
 
     Map<String, DesiredConfig> map = host.getDesiredConfigs(c1.getClusterId());
     Assert.assertTrue("Expect desired config to contain global", map.containsKey("global"));
     Assert.assertEquals("Expect global user to be '_test'", "_test", map.get("global").getUser());
 
-    config = configFactory.createNew(c1, "global",
+    config = configFactory.createNew(c1, "global", "v2",
         new HashMap<String,String>() {{ put("c", "d"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("v2");
     host.addDesiredConfig(c1.getClusterId(), true, "_test1", config);
 
     map = host.getDesiredConfigs(c1.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 77e5142..5987af3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -221,11 +221,8 @@ public class ServiceComponentHostTest {
 
     Cluster c = clusters.getCluster(clusterName);
     if (c.getConfig("time", String.valueOf(timestamp)) == null) {
-      Config config = configFactory.createNew (c, "time",
+      Config config = configFactory.createNew (c, "time", String.valueOf(timestamp),
           new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-      config.setTag(String.valueOf(timestamp));
-      c.addConfig(config);
-      config.persist();
     }
 
     switch (eventType) {
@@ -564,7 +561,6 @@ public class ServiceComponentHostTest {
     final ConfigGroup configGroup = configGroupFactory.createNew(cluster,
       "cg1", "t1", "", new HashMap<String, Config>(), new HashMap<Long, Host>());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Map<String, Map<String,String>> actual =
@@ -815,17 +811,14 @@ public class ServiceComponentHostTest {
     final Host host = clusters.getHostsForCluster(clusterName).get(hostName);
     Assert.assertNotNull(host);
 
-    final Config c = configFactory.createNew(cluster, "hdfs-site",
+    final Config c = configFactory.createNew(cluster, "hdfs-site", "version3",
         new HashMap<String, String>() {{ put("dfs.journalnode.http-address", "http://goo"); }},
         new HashMap<String, Map<String,String>>());
-    c.setTag("version3");
-    c.persist();
-    cluster.addConfig(c);
+
     host.addDesiredConfig(cluster.getClusterId(), true, "user", c);
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1",
       "t1", "", new HashMap<String, Config>() {{ put("hdfs-site", c); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     // HDP-x/HDFS/hdfs-site updated host to changed property
@@ -876,16 +869,12 @@ public class ServiceComponentHostTest {
 
     sch1.updateActualConfigs(actual);
 
-    final Config c1 = configFactory.createNew(cluster, "core-site",
+    final Config c1 = configFactory.createNew(cluster, "core-site", "version2",
       new HashMap<String, String>() {{ put("fs.trash.interval", "400"); }},
       new HashMap<String, Map<String,String>>());
-    c1.setTag("version2");
-    c1.persist();
-    cluster.addConfig(c1);
     configGroup = configGroupFactory.createNew(cluster, "g2",
       "t2", "", new HashMap<String, Config>() {{ put("core-site", c1); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Assert.assertTrue(sch1.convertToResponse(null).isStaleConfig());
@@ -1039,10 +1028,7 @@ public class ServiceComponentHostTest {
    * @param values the values for the config
    */
   private void makeConfig(Cluster cluster, String type, String tag, Map<String, String> values, Map<String, Map<String, String>> attributes) {
-    Config config = configFactory.createNew(cluster, type, values, attributes);
-    config.setTag(tag);
-    config.persist();
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, type, tag, values, attributes);
     cluster.addDesiredConfig("user", Collections.singleton(config));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
index 82526e7..fac5185 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
@@ -59,6 +59,7 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
@@ -66,13 +67,14 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.easymock.Capture;
+import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableList;
 
 /**
  * AmbariContext unit tests
@@ -110,6 +112,7 @@ public class AmbariContextTest {
   private static final ConfigGroup configGroup2 = createMock(ConfigGroup.class);
   private static final Host host1 = createNiceMock(Host.class);
   private static final Host host2 = createNiceMock(Host.class);
+  private static final ConfigFactory configFactory = createNiceMock(ConfigFactory.class);
 
   private static final Collection<String> blueprintServices = new HashSet<String>();
   private static final Map<String, Service> clusterServices = new HashMap<String, Service>();
@@ -164,6 +167,9 @@ public class AmbariContextTest {
     type1Props.put("prop3", "val3");
     group1Configuration = new Configuration(group1Properties, null, bpConfiguration);
 
+    Map<String, String> group1ResolvedProperties = new HashMap<String, String>(bpType1Props);
+    group1ResolvedProperties.putAll(type1Props);
+
     // config type -> service mapping
     Map<String, String> configTypeServiceMapping = new HashMap<String, String>();
     configTypeServiceMapping.put("type1", "service1");
@@ -172,6 +178,28 @@ public class AmbariContextTest {
     configGroups.put(1L, configGroup1);
     configGroups.put(2L, configGroup2);
 
+    // config factory mock
+    Config type1Group1 = createNiceMock(Config.class);
+    expect(type1Group1.getType()).andReturn("type1").anyTimes();
+    expect(type1Group1.getTag()).andReturn("group1").anyTimes();
+    expect(type1Group1.getProperties()).andReturn(group1ResolvedProperties).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("group1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(type1Group1).anyTimes();
+    replay(type1Group1);
+
+    Config type1Service1 = createNiceMock(Config.class);
+    expect(type1Service1.getType()).andReturn("type1").anyTimes();
+    expect(type1Service1.getTag()).andReturn("service1").anyTimes();
+    expect(type1Service1.getProperties()).andReturn(type1Props).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("service1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(
+            type1Service1).anyTimes();
+    replay(type1Service1);
+
+    context.configFactory = configFactory;
+
     blueprintServices.add("service1");
     blueprintServices.add("service2");
 
@@ -222,17 +250,17 @@ public class AmbariContextTest {
   public void tearDown() throws Exception {
     verify(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
 
     reset(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   private void replayAll() {
     replay(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   @Test
@@ -330,6 +358,7 @@ public class AmbariContextTest {
     expect(clusterController.ensureResourceProvider(Resource.Type.ConfigGroup)).andReturn(configGroupResourceProvider).once();
     //todo: for now not using return value so just returning null
     expect(configGroupResourceProvider.createResources(capture(configGroupRequestCapture))).andReturn(null).once();
+
     // replay all mocks
     replayAll();
 
@@ -416,7 +445,6 @@ public class AmbariContextTest {
 
     expect(configGroup1.getHosts()).andReturn(Collections.singletonMap(2L, host2)).once();
     configGroup1.addHost(host1);
-    configGroup1.persistHostMapping();
 
     // replay all mocks
     replayAll();

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
index f9dd5d1..3bb6c0a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
@@ -49,6 +49,8 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.utils.CollectionPresentationUtils;
@@ -62,6 +64,7 @@ import com.google.gson.JsonPrimitive;
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
 
 import junit.framework.Assert;
 
@@ -212,16 +215,12 @@ public class HostUpdateHelperTest {
     Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
     Cluster mockCluster = easyMockSupport.createNiceMock(Cluster.class);
     ClusterEntity mockClusterEntity1 = easyMockSupport.createNiceMock(ClusterEntity.class);
-    ClusterEntity mockClusterEntity2 = easyMockSupport.createNiceMock(ClusterEntity.class);
     ClusterConfigEntity mockClusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     ClusterConfigEntity mockClusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity3 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity4 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     StackEntity mockStackEntity = easyMockSupport.createNiceMock(StackEntity.class);
     Map<String, Map<String, String>> clusterHostsToChange = new HashMap<>();
     Map<String, String> hosts = new HashMap<>();
     List<ClusterConfigEntity> clusterConfigEntities1 = new ArrayList<>();
-    List<ClusterConfigEntity> clusterConfigEntities2 = new ArrayList<>();
 
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
@@ -231,6 +230,8 @@ public class HostUpdateHelperTest {
         bind(EntityManager.class).toInstance(entityManager);
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
         bind(ClusterDAO.class).toInstance(mockClusterDAO);
+
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });
 
@@ -242,49 +243,42 @@ public class HostUpdateHelperTest {
     clusterConfigEntities1.add(mockClusterConfigEntity1);
     clusterConfigEntities1.add(mockClusterConfigEntity2);
 
-    clusterConfigEntities2.add(mockClusterConfigEntity3);
-    clusterConfigEntities2.add(mockClusterConfigEntity4);
-
     clusterHostsToChange.put("cl1", hosts);
 
-    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).once();
-    expect(mockClusterDAO.findById(1L)).andReturn(mockClusterEntity2).atLeastOnce();
+    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).atLeastOnce();
 
     expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
 
     expect(mockClusters.getCluster("cl1")).andReturn(mockCluster).once();
-    expect(mockCluster.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockCluster.getClusterId()).andReturn(1L).anyTimes();
 
     expect(mockClusterEntity1.getClusterConfigEntities()).andReturn(clusterConfigEntities1).atLeastOnce();
-    expect(mockClusterEntity2.getClusterConfigEntities()).andReturn(clusterConfigEntities2).atLeastOnce();
 
-    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity1.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getConfigId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity1.getData()).andReturn("{\"testProperty1\" : \"testValue_host1\", " +
             "\"testProperty2\" : \"testValue_host5\", \"testProperty3\" : \"testValue_host11\", " +
             "\"testProperty4\" : \"testValue_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity1.getTag()).andReturn("testTag1").atLeastOnce();
     expect(mockClusterConfigEntity1.getType()).andReturn("testType1").atLeastOnce();
     expect(mockClusterConfigEntity1.getVersion()).andReturn(1L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(1L)).andReturn(mockClusterConfigEntity1).atLeastOnce();
 
-    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity2.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity2.getConfigId()).andReturn(2L).anyTimes();
+    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity2.getData()).andReturn("{\"testProperty5\" : \"test_host1_test_host5_test_host11_test_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity2.getTag()).andReturn("testTag2").atLeastOnce();
     expect(mockClusterConfigEntity2.getType()).andReturn("testType2").atLeastOnce();
     expect(mockClusterConfigEntity2.getVersion()).andReturn(2L).atLeastOnce();
-
-    expect(mockClusterConfigEntity3.getTag()).andReturn("testTag1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getType()).andReturn("testType1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getVersion()).andReturn(1L).atLeastOnce();
-
-    expect(mockClusterConfigEntity4.getTag()).andReturn("testTag2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getType()).andReturn("testType2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getVersion()).andReturn(2L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(2L)).andReturn(mockClusterConfigEntity2).atLeastOnce();
 
     Capture<String> dataCapture = EasyMock.newCapture();
-    mockClusterConfigEntity3.setData(EasyMock.capture(dataCapture));
+    mockClusterConfigEntity1.setData(EasyMock.capture(dataCapture));
     expectLastCall();
 
-    mockClusterConfigEntity4.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
+    mockClusterConfigEntity2.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
     expectLastCall();
 
     HostUpdateHelper hostUpdateHelper = new HostUpdateHelper(null, null, mockInjector);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index 29f40fb..5c77831 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -65,6 +65,9 @@ import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
@@ -126,6 +129,7 @@ public class StageUtilsTest extends EasyMockSupport {
         bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
 
         install(new FactoryModuleBuilder().build(ExecutionCommandWrapperFactory.class));
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });
 


[29/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_widgets.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_widgets.json
new file mode 100644
index 0000000..782f21d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_widgets.json
@@ -0,0 +1,670 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Bad Local Disks",
+          "description": "Number of unhealthy local disks across all NodeManagers.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "metric_path": "metrics/yarn/BadLocalDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.BadLogDirs",
+              "metric_path": "metrics/yarn/BadLogDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Number of unhealthy local disks for NodeManager",
+              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "YARN local disk space utilization per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "YARN local disk space utilization per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/alerts.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/alerts.json
new file mode 100644
index 0000000..c4a58bb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/alerts.json
@@ -0,0 +1,392 @@
+{
+  "MAPREDUCE2": {
+    "service": [],
+    "HISTORYSERVER": [
+      {
+        "name": "mapreduce_history_server_webui",
+        "label": "History Server Web UI",
+        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_cpu",
+        "label": "History Server CPU Utilization",
+        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_rpc_latency",
+        "label": "History Server RPC Latency",
+        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      }
+    ]
+  },
+  "YARN": {
+    "service": [
+      {
+        "name": "yarn_nodemanager_webui_percent",
+        "label": "Percent NodeManagers Available",
+        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "yarn_nodemanager_webui",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "NODEMANAGER": [
+      {
+        "name": "yarn_nodemanager_webui",
+        "label": "NodeManager Web UI",
+        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "default_port": 8042,
+            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_nodemanager_health",
+        "label": "NodeManager Health",
+        "description": "This host-level alert checks the node health property available from the NodeManager component.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "RESOURCEMANAGER": [
+      {
+        "name": "yarn_resourcemanager_webui",
+        "label": "ResourceManager Web UI",
+        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_cpu",
+        "label": "ResourceManager CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_rpc_latency",
+        "label": "ResourceManager RPC Latency",
+        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "nodemanager_health_summary",
+        "label": "NodeManager Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "APP_TIMELINE_SERVER": [
+      {
+        "name": "yarn_app_timeline_server_webui",
+        "label": "App Timeline Web UI",
+        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline",
+            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-env.xml
new file mode 100644
index 0000000..2ac0bff
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-env.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <display-name>Mapreduce Log Dir Prefix</display-name>
+    <description>Mapreduce Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <display-name>Mapreduce PID Dir Prefix</display-name>
+    <description>Mapreduce PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <display-name>Mapreduce User</display-name>
+    <value>mapred</value>
+    <property-type>USER</property-type>
+    <description>Mapreduce User</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>jobhistory_heapsize</name>
+    <display-name>History Server heap size</display-name>
+    <value>900</value>
+    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
+    <value-attributes>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user_nofile_limit</name>
+    <value>32768</value>
+    <description>Max open files limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>mapred-env template</display-name>
+    <description>This is the jinja template for mapred-env.sh file</description>
+    <value>
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+      export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+
+      export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+      #export HADOOP_JOB_HISTORYSERVER_OPTS=
+      #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+      #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+      #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+      #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+      #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-logsearch-conf.xml
new file mode 100644
index 0000000..3c0abbf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-logsearch-conf.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>service_name</name>
+    <display-name>Service name</display-name>
+    <description>Service name for Logsearch Portal (label)</description>
+    <value>MapReduce</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>component_mappings</name>
+    <display-name>Component mapping</display-name>
+    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
+    <value>HISTORYSERVER:mapred_historyserver</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>Logfeeder Config</display-name>
+    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
+    <value>
+{
+  "input":[
+    {
+      "type":"mapred_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log"
+    }
+   ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "mapred_historyserver"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+         }
+       }
+     }
+   ]
+ }
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..e51107a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration-mapred/mapred-site.xml
@@ -0,0 +1,540 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- i/o properties -->
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>358</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+    <display-name>Sort Allocation Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2047</maximum>
+      <unit>MB</unit>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.max</name>
+    <value>130</value>
+    <description>
+      Limit on the number of counters allowed per job.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>512</value>
+    <description>Virtual memory for single Map task</description>
+    <display-name>Map Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+    <display-name>Reduce Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+    <display-name>AppMaster Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx410m</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx410m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+    <display-name>MR Map Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+    <display-name>MR Reduce Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.reduce.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for JobHistoryServer web UI.
+      The following values are supported: - HTTP_ONLY : Service is provided only
+      on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.queuename</name>
+    <value>default</value>
+    <description>
+      Queue to which a job is submitted.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
+    <value>1</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
+    <value>30000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.emit-timeline-data</name>
+    <value>false</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.bind-host</name>
+    <value>0.0.0.0</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>mapreduce.jobhistory.recovery.enable</name>
+    <value>true</value>
+    <description>Enable the history server to store server state and recover
+      server state upon startup.  If enabled then
+      mapreduce.jobhistory.recovery.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.class</name>
+    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
+    <description>The HistoryServerStateStoreService class to store history server
+      state for recovery.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
+    <value>/hadoop/mapreduce/jhs</value>
+    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
+      is configured as the recovery storage class.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..320a629
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/capacity-scheduler.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>
+      Default minimum queue resource limit depends on the number of users who have submitted applications.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <display-name>CPU Scheduling</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+    <value>*</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>capacity-scheduler</name>
+    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-audit.xml
new file mode 100644
index 0000000..a6b1baa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-audit.xml
@@ -0,0 +1,177 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/db/spool</value>
+    <description>/var/log/hadoop/yarn/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
+    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-plugin-properties.xml
new file mode 100644
index 0000000..97867cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-plugin-properties.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for YARN</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-yarn-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for YARN</display-name>
+    <description>Enable ranger yarn plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>


[12/51] [abbrv] ambari git commit: AMBARI-18737 Perf: Allow Kerberizing the PERF stack (dsen)

Posted by sm...@apache.org.
AMBARI-18737 Perf: Allow Kerberizing the PERF stack (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/338c2c5b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/338c2c5b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/338c2c5b

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 338c2c5be000fb6a681bc5c82f8c4cb20c192500
Parents: 9cc66e4
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Dec 8 13:55:33 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Dec 8 13:58:49 2016 +0200

----------------------------------------------------------------------
 .../libraries/script/dummy.py                   |  11 +
 .../PERF/1.0/configuration/cluster-env.xml      |  27 +-
 .../resources/stacks/PERF/1.0/kerberos.json     |  78 ++++
 .../GRUMPY/configuration/grumpy-site.xml        |  12 +
 .../PERF/1.0/services/GRUMPY/kerberos.json      |  78 ++++
 .../services/GRUMPY/package/scripts/dwarf.py    |   4 +
 .../services/HAPPY/configuration/happy-site.xml |  12 +
 .../PERF/1.0/services/HAPPY/kerberos.json       |  78 ++++
 .../1.0/services/HAPPY/package/scripts/dwarf.py |   4 +
 .../HBASE/package/scripts/hbase_master.py       |   4 +
 .../HBASE/package/scripts/hbase_regionserver.py |   4 +
 .../package/scripts/phoenix_queryserver.py      |   4 +
 .../services/HDFS/package/scripts/datanode.py   |   4 +
 .../HDFS/package/scripts/journalnode.py         |   4 +
 .../services/HDFS/package/scripts/namenode.py   |   4 +
 .../services/HDFS/package/scripts/nfsgateway.py |   4 +
 .../services/HDFS/package/scripts/snamenode.py  |   4 +
 .../KERBEROS/configuration/kerberos-env.xml     | 380 +++++++++++++++
 .../KERBEROS/configuration/krb5-conf.xml        | 109 +++++
 .../PERF/1.0/services/KERBEROS/kerberos.json    |  17 +
 .../PERF/1.0/services/KERBEROS/metainfo.xml     | 123 +++++
 .../KERBEROS/package/scripts/kerberos_client.py |  80 ++++
 .../KERBEROS/package/scripts/kerberos_common.py | 468 +++++++++++++++++++
 .../services/KERBEROS/package/scripts/params.py | 200 ++++++++
 .../KERBEROS/package/scripts/service_check.py   |  30 ++
 .../KERBEROS/package/scripts/status_params.py   |  32 ++
 .../services/KERBEROS/package/scripts/utils.py  | 105 +++++
 .../KERBEROS/package/templates/krb5_conf.j2     |  54 +++
 .../SLEEPY/configuration/sleepy-site.xml        |  12 +
 .../PERF/1.0/services/SLEEPY/kerberos.json      |  78 ++++
 .../services/SLEEPY/package/scripts/dwarf.py    |   4 +
 .../services/SNOW/configuration/snow-site.xml   |  12 +
 .../stacks/PERF/1.0/services/SNOW/kerberos.json |  78 ++++
 .../services/SNOW/package/scripts/snow_white.py |   4 +
 .../scripts/application_timeline_server.py      |   4 +
 .../YARN/package/scripts/historyserver.py       |   4 +
 .../YARN/package/scripts/nodemanager.py         |   4 +
 .../YARN/package/scripts/resourcemanager.py     |   4 +
 .../PERF/1.0/services/ZOOKEEPER/kerberos.json   |  39 ++
 .../package/scripts/zookeeper_server.py         |   4 +
 40 files changed, 2180 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/dummy.py b/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
index 2a48de3..3dcece5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/dummy.py
@@ -31,6 +31,9 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core.exceptions import ComponentIsNotRunning
 
 
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
 class Dummy(Script):
   """
   Dummy component to be used for performance testing since doesn't actually run a service.
@@ -75,6 +78,14 @@ class Dummy(Script):
     print "Start"
     self.prepare()
 
+    if self.config['configurations']['cluster-env']['security_enabled'] :
+      print "Executing kinit... "
+      kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+      principal_replaced = self.config['configurations'][self.principal_conf_name][self.principal_name].replace("_HOST", self.host_name)
+      keytab_path_replaced = self.config['configurations'][self.keytab_conf_name][self.keytab_name].replace("_HOST", self.host_name)
+      Execute("%s -kt %s %s" % (kinit_path_local, keytab_path_replaced, principal_replaced),
+              user="root")
+
     if not os.path.isfile(self.pid_file):
       print "Creating pid file: %s" % self.pid_file
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7d6dac4..7df00ee 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -71,5 +71,30 @@
     <description>Security</description>
     <on-ambari-upgrade add="false"/>
   </property>
-
+  <property>
+    <name>user_group</name>
+    <display-name>Hadoop Group</display-name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Hadoop user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <display-name>Smoke User</display-name>
+    <value>ambari-qa</value>
+    <property-type>USER</property-type>
+    <description>User executing service checks</description>
+    <value-attributes>
+      <type>user</type>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
new file mode 100644
index 0000000..a2a667b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/home/_HOST/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "/etc/security/keytabs/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
index 7952789..3cf8ea1 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/configuration/grumpy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>grumpy_user</name>
+    <display-name>grumpy User</display-name>
+    <value>grumpy</value>
+    <property-type>USER</property-type>
+    <description>grumpy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
new file mode 100644
index 0000000..3bf5fbd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "GRUMPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "grumpy",
+          "principal": {
+            "value": "${grumpy-site/grumpy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "grumpy-site/grumpy_principal_name",
+            "local_username": "${grumpy-site/grumpy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/grumpy.headless.keytab",
+            "owner": {
+              "name": "${grumpy-site/grumpy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "grumpy-site/grumpy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "GRUMPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "grumpy_grumpy",
+              "principal": {
+                "value": "grumpy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "grumpy-site/grumpy.grumpy.kerberos.principal",
+                "local_username": "${grumpy-site/grumpy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/grumpy.service.keytab",
+                "owner": {
+                  "name": "${grumpy-site/grumpy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "grumpy-site/grumpy.grumpy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "grumpy-site/grumpy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "grumpy-site/grumpy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
index cf4206c..de2f4b4 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/GRUMPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Grumpy(Dummy):
   def __init__(self):
     super(Grumpy, self).__init__()
     self.component_name = "GRUMPY"
+    self.principal_conf_name = "grumpy-site"
+    self.principal_name = "grumpy.grumpy.kerberos.principal"
+    self.keytab_conf_name = "grumpy-site"
+    self.keytab_name = "grumpy.grumpy.keytab.file"
 
 if __name__ == "__main__":
   Grumpy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
index 67762a5..693046e 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/configuration/happy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>happy_user</name>
+    <display-name>happy User</display-name>
+    <value>happy</value>
+    <property-type>USER</property-type>
+    <description>happy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
new file mode 100644
index 0000000..ae84eaf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "HAPPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "happy",
+          "principal": {
+            "value": "${happy-site/happy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "happy-site/happy_principal_name",
+            "local_username": "${happy-site/happy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/happy.headless.keytab",
+            "owner": {
+              "name": "${happy-site/happy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "happy-site/happy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "HAPPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "happy_happy",
+              "principal": {
+                "value": "happy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "happy-site/happy.happy.kerberos.principal",
+                "local_username": "${happy-site/happy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/happy.service.keytab",
+                "owner": {
+                  "name": "${happy-site/happy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "happy-site/happy.happy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "happy-site/happy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "happy-site/happy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
index b86c4c7..dc8198d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HAPPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Happy(Dummy):
   def __init__(self):
     super(Happy, self).__init__()
     self.component_name = "HAPPY"
+    self.principal_conf_name = "happy-site"
+    self.principal_name = "happy.happy.kerberos.principal"
+    self.keytab_conf_name = "happy-site"
+    self.keytab_name = "happy.happy.keytab.file"
 
 if __name__ == "__main__":
   Happy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
index 7ea42ab..3761fcf 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
@@ -33,6 +33,10 @@ class HBaseMaster(Dummy):
   def __init__(self):
     super(HBaseMaster, self).__init__()
     self.component_name = "HBASE_MASTER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "hbase.master.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "hbase.master.keytab.file"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
index a866715..101c36f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
@@ -33,6 +33,10 @@ class HBaseRegionServer(Dummy):
   def __init__(self):
     super(HBaseRegionServer, self).__init__()
     self.component_name = "HBASE_REGIONSERVER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "hbase.regionserver.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "hbase.regionserver.keytab.file"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
index 9cf3cec..76a49d9 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
@@ -33,6 +33,10 @@ class PhoenixQueryServer(Dummy):
   def __init__(self):
     super(PhoenixQueryServer, self).__init__()
     self.component_name = "PHOENIX_QUERY_SERVER"
+    self.principal_conf_name = "hbase-site"
+    self.principal_name = "phoenix.queryserver.kerberos.principal"
+    self.keytab_conf_name = "hbase-site"
+    self.keytab_name = "phoenix.queryserver.keytab.file"
 
 if __name__ == "__main__":
   PhoenixQueryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
index 36edc31..6fc338b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
@@ -33,6 +33,10 @@ class DataNode(Dummy):
   def __init__(self):
     super(DataNode, self).__init__()
     self.component_name = "DATANODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.datanode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.datanode.keytab.file"
 
 if __name__ == "__main__":
   DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
index 1ad13b7..96be630 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
@@ -33,6 +33,10 @@ class JournalNode(Dummy):
   def __init__(self):
     super(JournalNode, self).__init__()
     self.component_name = "JOURNALNODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.journalnode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.journalnode.keytab.file"
 
 if __name__ == "__main__":
   JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
index ded09cb..c3488e8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
@@ -34,6 +34,10 @@ class NameNode(Dummy):
   def __init__(self):
     super(NameNode, self).__init__()
     self.component_name = "NAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.namenode.keytab.file"
 
   def rebalancehdfs(self, env):
     print "Rebalance HDFS"

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
index ab9855d..b750522 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
@@ -33,6 +33,10 @@ class NFSGateway(Dummy):
   def __init__(self):
     super(NFSGateway, self).__init__()
     self.component_name = "NFS_GATEWAY"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "nfs.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "nfs.keytab.file"
 
 if __name__ == "__main__":
   NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
index 8815aa3..91ce7da 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
@@ -33,6 +33,10 @@ class SNameNode(Dummy):
   def __init__(self):
     super(SNameNode, self).__init__()
     self.component_name = "SECONDARY_NAMENODE"
+    self.principal_conf_name = "hdfs-site"
+    self.principal_name = "dfs.secondary.namenode.kerberos.principal"
+    self.keytab_conf_name = "hdfs-site"
+    self.keytab_name = "dfs.secondary.namenode.keytab.file"
 
 if __name__ == "__main__":
   SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
new file mode 100644
index 0000000..7016437
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/kerberos-env.xml
@@ -0,0 +1,380 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property require-input="true">
+    <name>kdc_type</name>
+    <description>
+      The type of KDC being used. Either mit-kdc, ipa, or active-directory
+    </description>
+    <value>mit-kdc</value>
+    <display-name>KDC type</display-name>
+    <value-attributes>
+      <type>componentHost</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_identities</name>
+    <description>
+      Indicates whether the Ambari user and service Kerberos identities (principals and keytab files)
+      should be managed (created, deleted, updated, etc...) by Ambari or managed manually.
+    </description>
+    <value>true</value>
+    <display-name>Manage Kerberos Identities</display-name>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_auth_to_local</name>
+    <description>
+      Indicates whether the hadoop auth_to_local rules should be managed by Ambari or managed manually.
+    </description>
+    <value>true</value>
+    <display-name>Manage Hadoop auth_to_local rules</display-name>
+    <value-attributes>
+      <visible>true</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>install_packages</name>
+    <display-name>Install OS-specific Kerberos client package(s)</display-name>
+    <description>
+      Indicates whether Ambari should install the Kerberos client package(s) or not. If not, it is
+      expected that Kerberos utility programs (such as kadmin, kinit, klist, and kdestroy) are
+      compatible with MIT Kerberos 5 version 1.10.3 in command line options and behaviors.
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ldap_url</name>
+    <display-name>LDAP url</display-name>
+    <description>
+      The URL to the Active Directory LDAP Interface
+      Example: ldaps://ad.example.com:636
+    </description>
+    <value/>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+      <type>ldap_url</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>container_dn</name>
+    <display-name>Container DN</display-name>
+    <description>
+      The distinguished name (DN) of the container used store service principals
+    </description>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <value/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>encryption_types</name>
+    <display-name>Encryption Types</display-name>
+    <description>
+      The supported list of session key encryption types that should be returned by the KDC.
+    </description>
+    <value>aes des3-cbc-sha1 rc4 des-cbc-md5</value>
+    <value-attributes>
+      <type>multiLine</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property require-input="true">
+    <name>realm</name>
+    <description>
+      The default realm to use when creating service principals
+    </description>
+    <display-name>Realm name</display-name>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kdc_hosts</name>
+    <description>
+      A comma-delimited list of IP addresses or FQDNs declaring the KDC hosts.
+      Optionally a port number may be included in each entry by separating each host and port by a
+      colon (:). Example:  kdc1.example.com:88, kdc2.example.com:88
+    </description>
+    <display-name>KDC hosts</display-name>
+    <value/>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>admin_server_host</name>
+    <display-name>Kadmin host</display-name>
+    <description>
+      The IP address or FQDN for the KDC Kerberos administrative host. Optionally a port number may be included.
+    </description>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>executable_search_paths</name>
+    <display-name>Executable Search Paths</display-name>
+    <description>
+      A comma-delimited list of search paths to use to find Kerberos utilities like kadmin, kinit and ipa.
+    </description>
+    <value>/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_length</name>
+    <display-name>Password Length</display-name>
+    <description>
+      The length required length for generated passwords.
+    </description>
+    <value>20</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_lowercase_letters</name>
+    <display-name>Password Minimum # Lowercase Letters</display-name>
+    <description>
+      The minimum number of lowercase letters (a-z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_uppercase_letters</name>
+    <display-name>Password Minimum # Uppercase Letters</display-name>
+    <description>
+      The minimum number of uppercase letters (A-Z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_digits</name>
+    <display-name>Password Minimum # Digits</display-name>
+    <description>
+      The minimum number of digits (0-9) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_punctuation</name>
+    <display-name>Password Minimum # Punctuation Characters</display-name>
+    <description>
+      The minimum number of punctuation characters (?.!$%^*()-_+=~) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_min_whitespace</name>
+    <display-name>Password Minimum # Whitespace Characters</display-name>
+    <description>
+      The minimum number of whitespace characters required in generated passwords
+    </description>
+    <value>0</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>service_check_principal_name</name>
+    <display-name>Test Kerberos Principal</display-name>
+    <description>
+      The principal name to use when executing the Kerberos service check
+    </description>
+    <value>${cluster_name|toLower()}-${short_date}</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>case_insensitive_username_rules</name>
+    <display-name>Enable case insensitive username rules</display-name>
+    <description>
+      Force principal names to resolve to lowercase local usernames in auth-to-local rules
+    </description>
+    <value>false</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ad_create_attributes_template</name>
+    <display-name>Account Attribute Template</display-name>
+    <description>
+      A Velocity template to use to generate a JSON-formatted document containing the set of
+      attribute names and values needed to create a new Kerberos identity in the relevant
+      Active Directory.
+      Variables include:
+      principal_name, principal_primary, principal_instance, realm, realm_lowercase,
+      normalized_principal, principal digest, password, is_service, container_dn
+    </description>
+    <value>
+{
+  "objectClass": ["top", "person", "organizationalPerson", "user"],
+  "cn": "$principal_name",
+  #if( $is_service )
+  "servicePrincipalName": "$principal_name",
+  #end
+  "userPrincipalName": "$normalized_principal",
+  "unicodePwd": "$password",
+  "accountExpires": "0",
+  "userAccountControl": "66048"
+}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>create_ambari_principal</name>
+    <description>
+    Indicates whether Ambari should create the principal, keytab for itself, used by different views.
+  </description>
+    <value>true</value>
+    <display-name>Create Ambari Principal &amp; Keytab</display-name>
+    <value-attributes>
+      <visible>true</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kdc_create_attributes</name>
+    <display-name>Principal Attributes</display-name>
+    <description>
+      The set of attributes to use when creating a new Kerberos identity in the relevant (MIT) KDC.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>group</name>
+    <display-name>IPA Group</display-name>
+    <description>
+      The group in IPA user principals should be member of
+    </description>
+    <value>ambari-managed-principals</value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>set_password_expiry</name>
+    <display-name>Set IPA principal password expiry</display-name>
+    <description>
+      Indicates whether Ambari should set the password expiry for the principals it creates. By default
+      IPA does not allow this. It requires write permission of the admin principal to the krbPasswordExpiry
+      attribute. If set IPA principal password expiry is not true it is assumed that a suitable password
+      policy is in place for the IPA Group principals are added to.
+    </description>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>password_chat_timeout</name>
+    <display-name>Set IPA kinit password chat timeout</display-name>
+    <description>
+      Indicates the timeout in seconds that Ambari should wait for a response during a password chat. This is
+      because it can take some time due to lookups before a response is there.
+    </description>
+    <value>5</value>
+    <value-attributes>
+      <visible>false</visible>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
new file mode 100644
index 0000000..c692b92
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/configuration/krb5-conf.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property require-input="false">
+    <name>domains</name>
+    <display-name>Domains</display-name>
+    <description>
+      A comma-separated list of domain names used to map server host names to the Realm name (e.g. .example.com,example.com). This is optional
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_krb5_conf</name>
+    <display-name>Manage Kerberos client krb5.conf</display-name>
+    <description>
+      Indicates whether your krb5.conf file should be managed by the wizard or should you manage it yourself
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>conf_dir</name>
+    <display-name>krb5-conf directory path</display-name>
+    <description>The krb5.conf configuration directory</description>
+    <value>/etc</value>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>krb5-conf template</display-name>
+    <description>Customizable krb5.conf template (Jinja template engine)</description>
+    <value>
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  default_ccache_name = /tmp/krb5cc_%{uid}
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+{% if domains %}
+[domain_realm]
+{%- for domain in domains.split(',') %}
+  {{domain|trim()}} = {{realm}}
+{%- endfor %}
+{% endif %}
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts &gt; 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length &gt; 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
new file mode 100644
index 0000000..6ab7610
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "KERBEROS",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "KERBEROS_CLIENT"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
new file mode 100644
index 0000000..3ec6340
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/metainfo.xml
@@ -0,0 +1,123 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KERBEROS</name>
+            <displayName>Kerberos</displayName>
+            <comment>A computer network authentication protocol which works on
+                the basis of 'tickets' to allow nodes communicating over a
+                non-secure network to prove their identity to one another in a
+                secure manner.
+            </comment>
+            <version>1.10.3-10</version>
+
+            <components>
+                <component>
+                    <name>KERBEROS_CLIENT</name>
+                    <displayName>Kerberos Client</displayName>
+                    <category>CLIENT</category>
+                    <cardinality>ALL</cardinality>
+                    <versionAdvertised>false</versionAdvertised>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <commandScript>
+                        <script>scripts/kerberos_client.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>1200</timeout>
+                    </commandScript>
+                    <customCommands>
+                        <customCommand>
+                            <name>SET_KEYTAB</name>
+                            <commandScript>
+                                <script>scripts/kerberos_client.py</script>
+                                <scriptType>PYTHON</scriptType>
+                                <timeout>1000</timeout>
+                            </commandScript>
+                        </customCommand>
+                        <customCommand>
+                            <name>REMOVE_KEYTAB</name>
+                            <commandScript>
+                                <script>scripts/kerberos_client.py</script>
+                                <scriptType>PYTHON</scriptType>
+                                <timeout>1000</timeout>
+                            </commandScript>
+                        </customCommand>
+                    </customCommands>
+                    <configFiles>
+                        <configFile>
+                            <type>env</type>
+                            <fileName>krb5.conf</fileName>
+                            <dictionaryName>krb5-conf</dictionaryName>
+                        </configFile>
+                    </configFiles>
+                </component>
+            </components>
+
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat7,amazon2015,redhat6</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-workstation</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+
+                <osSpecific>
+                    <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-user</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                        <package>
+                            <name>krb5-config</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+
+                <osSpecific>
+                    <osFamily>suse11,suse12</osFamily>
+                    <packages>
+                        <package>
+                            <name>krb5-client</name>
+                            <skipUpgrade>true</skipUpgrade>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>
+            </commandScript>
+
+            <configuration-dependencies>
+                <config-type>krb5-conf</config-type>
+                <config-type>kerberos-env</config-type>
+            </configuration-dependencies>
+            <restartRequiredAfterChange>true</restartRequiredAfterChange>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
new file mode 100644
index 0000000..ddc8063
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from kerberos_common import *
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class KerberosClient(KerberosScript):
+  def install(self, env):
+    install_packages = default('/configurations/kerberos-env/install_packages', "true")
+    if install_packages:
+      self.install_packages(env)
+    else:
+      print "Kerberos client packages are not being installed, manual installation is required."
+
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    if params.manage_krb5_conf:
+      self.write_krb5_conf()
+    #delete krb cache to prevent using old krb tickets on fresh kerberos setup
+    self.clear_tmp_cache()
+
+    self.setup_jce()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def security_status(self, env):
+    import status_params
+    if status_params.security_enabled:
+      if status_params.smoke_user and status_params.smoke_user_keytab:
+        try:
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.smoke_user,
+                                status_params.smoke_user_keytab,
+                                status_params.smoke_user_principal,
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        self.put_structured_out({"securityState": "UNKNOWN"})
+        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def set_keytab(self, env):
+    self.write_keytab_file()
+
+  def remove_keytab(self, env):
+    self.delete_keytab_file()
+
+  def download_install_jce(self, env):
+    self.setup_jce()
+
+
+if __name__ == "__main__":
+  KerberosClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
new file mode 100644
index 0000000..3c6c83e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_common.py
@@ -0,0 +1,468 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import base64
+import getpass
+import os
+import string
+import subprocess
+import sys
+import tempfile
+from tempfile import gettempdir
+
+from resource_management import *
+from utils import get_property_value
+from ambari_commons.os_utils import remove_file
+from ambari_agent import Constants
+
+class KerberosScript(Script):
+  KRB5_REALM_PROPERTIES = [
+    'kdc',
+    'admin_server',
+    'default_domain',
+    'master_kdc'
+  ]
+
+  KRB5_SECTION_NAMES = [
+    'libdefaults',
+    'logging',
+    'realms',
+    'domain_realm',
+    'capaths',
+    'ca_paths',
+    'appdefaults',
+    'plugins'
+  ]
+
+  @staticmethod
+  def create_random_password():
+    import random
+
+    chars = string.digits + string.ascii_letters
+    return ''.join(random.choice(chars) for x in range(13))
+
+  @staticmethod
+  def write_conf_section(output_file, section_name, section_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if section_data is not None:
+        for key, value in section_data.iteritems():
+          output_file.write(" %s = %s\n" % (key, value))
+
+
+  @staticmethod
+  def _write_conf_realm(output_file, realm_name, realm_data):
+    """ Writes out realm details
+
+    Example:
+
+     EXAMPLE.COM = {
+      kdc = kerberos.example.com
+      admin_server = kerberos.example.com
+     }
+
+    """
+    if realm_name is not None:
+      output_file.write(" %s = {\n" % realm_name)
+
+      if realm_data is not None:
+        for key, value in realm_data.iteritems():
+          if key in KerberosScript.KRB5_REALM_PROPERTIES:
+            output_file.write("  %s = %s\n" % (key, value))
+
+      output_file.write(" }\n")
+
+  @staticmethod
+  def write_conf_realms_section(output_file, section_name, realms_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if realms_data is not None:
+        for realm, realm_data in realms_data.iteritems():
+          KerberosScript._write_conf_realm(output_file, realm, realm_data)
+          output_file.write('\n')
+
+  @staticmethod
+  def write_krb5_conf():
+    import params
+
+    Directory(params.krb5_conf_dir,
+              owner='root',
+              create_parents = True,
+              group='root',
+              mode=0755
+    )
+
+    if (params.krb5_conf_template is None) or not params.krb5_conf_template.strip():
+      content = Template('krb5_conf.j2')
+    else:
+      content = InlineTemplate(params.krb5_conf_template)
+
+    File(params.krb5_conf_path,
+         content=content,
+         owner='root',
+         group='root',
+         mode=0644
+    )
+
+  @staticmethod
+  def invoke_kadmin(query, admin_identity=None, default_realm=None):
+    """
+    Executes the kadmin or kadmin.local command (depending on whether auth_identity is set or not
+    and returns command result code and standard out data.
+
+    :param query: the kadmin query to execute
+    :param admin_identity: the identity for the administrative user (optional)
+    :param default_realm: the default realm to assume
+    :return: return_code, out
+    """
+    if (query is not None) and (len(query) > 0):
+      auth_principal = None
+      auth_keytab_file = None
+
+      if admin_identity is not None:
+        auth_principal = get_property_value(admin_identity, 'principal')
+
+      if auth_principal is None:
+        kadmin = 'kadmin.local'
+        credential = ''
+      else:
+        kadmin = 'kadmin -p "%s"' % auth_principal
+
+        auth_password = get_property_value(admin_identity, 'password')
+
+        if auth_password is None:
+          auth_keytab = get_property_value(admin_identity, 'keytab')
+
+          if auth_keytab is not None:
+            (fd, auth_keytab_file) = tempfile.mkstemp()
+            keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+            os.write(fd, base64.b64decode(auth_keytab))
+            os.close(fd)
+
+          credential = '-k -t %s' % auth_keytab_file
+        else:
+          credential = '-w "%s"' % auth_password
+
+      if (default_realm is not None) and (len(default_realm) > 0):
+        realm = '-r %s' % default_realm
+      else:
+        realm = ''
+
+      try:
+        command = '%s %s %s -q "%s"' % (kadmin, credential, realm, query.replace('"', '\\"'))
+        return shell.checked_call(command)
+      except:
+        raise
+      finally:
+        if auth_keytab_file is not None:
+          os.remove(auth_keytab_file)
+
+  @staticmethod
+  def create_keytab_file(principal, path, auth_identity=None):
+    success = False
+
+    if (principal is not None) and (len(principal) > 0):
+      if (auth_identity is None) or (len(auth_identity) == 0):
+        norandkey = '-norandkey'
+      else:
+        norandkey = ''
+
+      if (path is not None) and (len(path) > 0):
+        keytab_file = '-k %s' % path
+      else:
+        keytab_file = ''
+
+      try:
+        result_code, output = KerberosScript.invoke_kadmin(
+          'ktadd %s %s %s' % (keytab_file, norandkey, principal),
+          auth_identity)
+
+        success = (result_code == 0)
+      except:
+        raise Fail("Failed to create keytab for principal: %s (in %s)" % (principal, path))
+
+    return success
+
+  @staticmethod
+  def create_keytab(principal, auth_identity=None):
+    keytab = None
+
+    (fd, temp_path) = tempfile.mkstemp()
+    os.remove(temp_path)
+
+    try:
+      if KerberosScript.create_keytab_file(principal, temp_path, auth_identity):
+        with open(temp_path, 'r') as f:
+          keytab = base64.b64encode(f.read())
+    finally:
+      if os.path.isfile(temp_path):
+        os.remove(temp_path)
+
+    return keytab
+
+  @staticmethod
+  def principal_exists(identity, auth_identity=None):
+    exists = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        try:
+          result_code, output = KerberosScript.invoke_kadmin('getprinc %s' % principal,
+                                                             auth_identity)
+          exists = (output is not None) and (("Principal: %s" % principal) in output)
+        except:
+          raise Fail("Failed to determine if principal exists: %s" % principal)
+
+    return exists
+
+  @staticmethod
+  def change_principal_password(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, output = KerberosScript.invoke_kadmin(
+            'change_password %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def create_principal(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, out = KerberosScript.invoke_kadmin(
+            'addprinc %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def clear_tmp_cache():
+    tmp_dir = Constants.AGENT_TMP_DIR
+    if tmp_dir is None:
+      tmp_dir = gettempdir()
+    curl_krb_cache_path = os.path.join(tmp_dir, "curl_krb_cache")
+    Directory(curl_krb_cache_path, action="delete")
+
+  @staticmethod
+  def create_principals(identities, auth_identity=None):
+    if identities is not None:
+      for identity in identities:
+        KerberosScript.create_principal(identity, auth_identity)
+
+  @staticmethod
+  def create_or_update_administrator_identity():
+    import params
+
+    if params.realm is not None:
+      admin_identity = params.get_property_value(params.realm, 'admin_identity')
+
+      if KerberosScript.principal_exists(admin_identity):
+        KerberosScript.change_principal_password(admin_identity)
+      else:
+        KerberosScript.create_principal(admin_identity)
+
+  @staticmethod
+  def test_kinit(identity, user="root"):
+    principal = get_property_value(identity, 'principal')
+    kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kdestroy_path_local = functions.get_kdestroy_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+    if principal is not None:
+      keytab_file = get_property_value(identity, 'keytab_file')
+      keytab = get_property_value(identity, 'keytab')
+      password = get_property_value(identity, 'password')
+
+      # If a test keytab file is available, simply use it
+      if (keytab_file is not None) and (os.path.isfile(keytab_file)):
+        keytab_file = keytab_file.replace("_HOST", params.hostname)
+        command = '%s -k -t %s %s' % (kinit_path_local, keytab_file, principal)
+        Execute(command,
+          user = user,
+        )
+        return shell.checked_call(kdestroy_path_local)
+
+      # If base64-encoded test keytab data is available; then decode it, write it to a temporary file
+      # use it, and then remove the temporary file
+      elif keytab is not None:
+        (fd, test_keytab_file) = tempfile.mkstemp()
+        os.write(fd, base64.b64decode(keytab))
+        os.close(fd)
+
+        try:
+          command = '%s -k -t %s %s' % (kinit_path_local, test_keytab_file, principal)
+          Execute(command,
+            user = user,
+          )
+          return shell.checked_call(kdestroy_path_local)
+        except:
+          raise
+        finally:
+          if test_keytab_file is not None:
+            os.remove(test_keytab_file)
+
+      # If no keytab data is available and a password was supplied, simply use it.
+      elif password is not None:
+        process = subprocess.Popen([kinit_path_local, principal], stdin=subprocess.PIPE)
+        stdout, stderr = process.communicate(password)
+        if process.returncode:
+          err_msg = Logger.filter_text("Execution of kinit returned %d. %s" % (process.returncode, stderr))
+          raise Fail(err_msg)
+        else:
+          return shell.checked_call(kdestroy_path_local)
+      else:
+        return 0, ''
+    else:
+      return 0, ''
+
+
+  def write_keytab_file(self):
+    import params
+    import stat
+
+    if params.kerberos_command_params is not None:
+      for item  in params.kerberos_command_params:
+        keytab_content_base64 = get_property_value(item, 'keytab_content_base64')
+        if (keytab_content_base64 is not None) and (len(keytab_content_base64) > 0):
+          keytab_file_path = get_property_value(item, 'keytab_file_path')
+          if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+            keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+            head, tail = os.path.split(keytab_file_path)
+            if head:
+              Directory(head, create_parents = True, mode=0755, owner="root", group="root")
+
+            owner = "root"
+            group = "root"
+            mode = 0
+
+            mode |= stat.S_IREAD | stat.S_IWRITE
+            mode |= stat.S_IRGRP | stat.S_IWGRP
+
+            keytab_content = base64.b64decode(keytab_content_base64)
+
+            # to hide content in command output
+            def make_lambda(data):
+              return lambda: data
+
+            File(keytab_file_path,
+                 content=make_lambda(keytab_content),
+                 mode=mode,
+                 owner=owner,
+                 group=group)
+
+            principal = get_property_value(item, 'principal')
+            if principal is not None:
+              curr_content = Script.structuredOut
+
+              if "keytabs" not in curr_content:
+                curr_content['keytabs'] = {}
+
+              curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = keytab_file_path
+
+              self.put_structured_out(curr_content)
+
+  def delete_keytab_file(self):
+    import params
+
+    if params.kerberos_command_params is not None:
+      for item in params.kerberos_command_params:
+        keytab_file_path = get_property_value(item, 'keytab_file_path')
+        if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+          keytab_file_path = keytab_file_path.replace("_HOST", params.hostname)
+          # Delete the keytab file
+          File(keytab_file_path, action="delete")
+
+          principal = get_property_value(item, 'principal')
+          if principal is not None:
+            curr_content = Script.structuredOut
+
+            if "keytabs" not in curr_content:
+              curr_content['keytabs'] = {}
+
+            curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = '_REMOVED_'
+
+            self.put_structured_out(curr_content)
+
+  def setup_jce(self):
+    import params
+
+    if not params.jdk_name:
+      return
+    jce_curl_target = None
+    if params.jce_policy_zip is not None:
+      jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+      Directory(params.artifact_dir,
+                create_parents = True,
+                )
+      File(jce_curl_target,
+           content = DownloadSource(format("{jce_location}/{jce_policy_zip}")),
+           )
+    elif params.security_enabled:
+      # Something weird is happening
+      raise Fail("Security is enabled, but JCE policy zip is not specified.")
+
+    # The extraction will occur only after the security flag is set
+    if params.security_enabled:
+      security_dir = format("{java_home}/jre/lib/security")
+
+      File([format("{security_dir}/US_export_policy.jar"), format("{security_dir}/local_policy.jar")],
+           action = "delete",
+           )
+
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_curl_target, "-d", security_dir)
+      Execute(extract_cmd,
+              only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+              path = ['/bin/','/usr/bin'],
+              sudo = True
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
new file mode 100644
index 0000000..3533b35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/params.py
@@ -0,0 +1,200 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import get_property_value, get_unstructured_data
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions.expect import expect
+
+krb5_conf_dir = '/etc'
+krb5_conf_file = 'krb5.conf'
+krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+if OSCheck.is_suse_family():
+  kdc_conf_dir = '/var/lib/kerberos/krb5kdc'
+elif OSCheck.is_ubuntu_family():
+  kdc_conf_dir = '/etc/krb5kdc'
+else:
+  kdc_conf_dir = '/var/kerberos/krb5kdc'
+kdc_conf_file = 'kdc.conf'
+kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+kadm5_acl_dir = kdc_conf_dir  # Typically kadm5.acl and kdc.conf exist in the same directory
+kadm5_acl_file = 'kadm5.acl'
+kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+configurations = None
+keytab_details = None
+default_group = None
+kdc_server_host = None
+cluster_host_info = None
+
+hostname = config['hostname']
+
+kdb5_util_path = 'kdb5_util'
+
+kdamin_pid_path = '/var/run/kadmind.pid'
+krb5kdc_pid_path = '/var/run/krb5kdc.pid'
+
+smoke_test_principal = None
+smoke_test_keytab_file = None
+
+smoke_user = 'ambari-qa'
+
+manage_identities = 'true'
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+if config is not None:
+  kerberos_command_params = get_property_value(config, 'kerberosCommandParams')
+
+  cluster_host_info = get_property_value(config, 'clusterHostInfo')
+  if cluster_host_info is not None:
+    kdc_server_hosts = get_property_value(cluster_host_info, 'kdc_server_hosts')
+
+    if (kdc_server_hosts is not None) and (len(kdc_server_hosts) > 0):
+      kdc_server_host = kdc_server_hosts[0]
+
+  configurations = get_property_value(config, 'configurations')
+  if configurations is not None:
+    cluster_env = get_property_value(configurations, 'cluster-env')
+
+    if cluster_env is not None:
+      smoke_test_principal = get_property_value(cluster_env, 'smokeuser_principal_name', None, True, None)
+      smoke_test_keytab_file = get_property_value(cluster_env, 'smokeuser_keytab', None, True, None)
+      smoke_user = get_property_value(cluster_env, 'smokeuser', smoke_user, True, smoke_user)
+
+      default_group = get_property_value(cluster_env, 'user_group')
+
+      if default_group is None:
+        default_group = get_property_value(cluster_env, 'user-group')
+
+    # ##############################################################################################
+    # Get krb5.conf template data
+    # ##############################################################################################
+    realm = 'EXAMPLE.COM'
+    domains = ''
+    kdc_hosts = 'localhost'
+    admin_server_host = None
+    admin_principal = None
+    admin_password = None
+    admin_keytab = None
+    test_principal = None
+    test_password = None
+    test_keytab = None
+    test_keytab_file = None
+    encryption_types = None
+    manage_krb5_conf = "true"
+    krb5_conf_template = None
+
+    krb5_conf_data = get_property_value(configurations, 'krb5-conf')
+
+    kerberos_env = get_property_value(configurations, "kerberos-env")
+
+    if kerberos_env is not None:
+      manage_identities = get_property_value(kerberos_env, "manage_identities", "true", True, "true")
+      encryption_types = get_property_value(kerberos_env, "encryption_types", None, True, None)
+      realm = get_property_value(kerberos_env, "realm", None, True, None)
+      kdc_hosts = get_property_value(kerberos_env, 'kdc_hosts', kdc_hosts)
+      admin_server_host = get_property_value(kerberos_env, 'admin_server_host', admin_server_host)
+
+    if krb5_conf_data is not None:
+      realm = get_property_value(krb5_conf_data, 'realm', realm)
+      domains = get_property_value(krb5_conf_data, 'domains', domains)
+
+      admin_principal = get_property_value(krb5_conf_data, 'admin_principal', admin_principal, True, None)
+      admin_password = get_property_value(krb5_conf_data, 'admin_password', admin_password, True, None)
+      admin_keytab = get_property_value(krb5_conf_data, 'admin_keytab', admin_keytab, True, None)
+
+      test_principal = get_property_value(krb5_conf_data, 'test_principal', test_principal, True, None)
+      test_password = get_property_value(krb5_conf_data, 'test_password', test_password, True, None)
+      test_keytab = get_property_value(krb5_conf_data, 'test_keytab', test_keytab, True, None)
+      test_keytab_file = get_property_value(krb5_conf_data, 'test_keytab_file', test_keytab_file, True, None)
+
+      krb5_conf_template = get_property_value(krb5_conf_data, 'content', krb5_conf_template)
+      krb5_conf_dir = get_property_value(krb5_conf_data, 'conf_dir', krb5_conf_dir)
+      krb5_conf_file = get_property_value(krb5_conf_data, 'conf_file', krb5_conf_file)
+      krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+      manage_krb5_conf = get_property_value(krb5_conf_data, 'manage_krb5_conf', "true")
+
+    # For backward compatibility, ensure that kdc_host exists. This may be needed if the krb5.conf
+    # template in krb5-conf/content had not be updated during the Ambari upgrade to 2.4.0 - which
+    # will happen if the template was altered from its stack-default value.
+    kdc_host_parts = kdc_hosts.split(',')
+    if kdc_host_parts:
+      kdc_host = kdc_host_parts[0]
+    else:
+      kdc_host = kdc_hosts
+
+    # ##############################################################################################
+    # Get kdc.conf template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = "88"
+    kdcdefaults_kdc_tcp_ports = "88"
+
+    kdc_conf_template = None
+
+    kdc_conf_data = get_property_value(configurations, 'kdc-conf')
+
+    if kdc_conf_data is not None:
+      kdcdefaults_kdc_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_ports', kdcdefaults_kdc_ports)
+      kdcdefaults_kdc_tcp_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_tcp_ports', kdcdefaults_kdc_tcp_ports)
+
+      kdc_conf_template = get_property_value(kdc_conf_data, 'content', kdc_conf_template)
+      kdc_conf_dir = get_property_value(kdc_conf_data, 'conf_dir', kdc_conf_dir)
+      kdc_conf_file = get_property_value(kdc_conf_data, 'conf_file', kdc_conf_file)
+      kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+    # ##############################################################################################
+    # Get kadm5.acl template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = '88'
+    kdcdefaults_kdc_tcp_ports = '88'
+
+    kadm5_acl_template = None
+
+    kadm5_acl_data = get_property_value(configurations, 'kadm5-acl')
+
+    if kadm5_acl_data is not None:
+      kadm5_acl_template = get_property_value(kadm5_acl_data, 'content', kadm5_acl_template)
+      kadm5_acl_dir = get_property_value(kadm5_acl_data, 'conf_dir', kadm5_acl_dir)
+      kadm5_acl_file = get_property_value(kadm5_acl_data, 'conf_file', kadm5_acl_file)
+      kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+  # ################################################################################################
+  # Get commandParams
+  # ################################################################################################
+  command_params = get_property_value(config, 'commandParams')
+  if command_params is not None:
+    keytab_details = get_unstructured_data(command_params, 'keytab')
+
+    if manage_identities:
+      smoke_test_principal = get_property_value(command_params, 'principal_name', smoke_test_principal)
+      smoke_test_keytab_file = get_property_value(command_params, 'keytab_file', smoke_test_keytab_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
new file mode 100644
index 0000000..555a93c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/service_check.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+
+class ServiceCheck(Script):
+
+    def service_check(self, env):
+        print "Service Check"
+
+if __name__ == "__main__":
+    ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
new file mode 100644
index 0000000..bbae4a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/status_params.py
@@ -0,0 +1,32 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+smoke_user_principal = config['configurations']['cluster-env']['smokeuser_principal_name']


[28/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
new file mode 100644
index 0000000..ad6cf4f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
new file mode 100644
index 0000000..5f69962
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.yarn.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Yarn instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
new file mode 100644
index 0000000..d8531b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
@@ -0,0 +1,306 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <display-name>YARN Log Dir Prefix</display-name>
+    <description>YARN Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <display-name>YARN PID Dir Prefix</display-name>
+    <description>YARN PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <display-name>Yarn User</display-name>
+    <value>yarn</value>
+    <property-type>USER</property-type>
+    <description>YARN User</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <display-name>YARN Java heap size</display-name>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>ResourceManager Java heap size</display-name>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>NodeManager Java heap size</display-name>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>min_user_id</name>
+    <value>1000</value>
+    <display-name>Minimum user ID for submitting job</display-name>
+    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>is_supported_yarn_ranger</name>
+    <value>true</value>
+    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nofile_limit</name>
+    <value>32768</value>
+    <description>Max open files limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.1 -->
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn_cgroups_enabled</name>
+    <value>false</value>
+    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+    <display-name>CPU Isolation</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>yarn-env template</display-name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+      export JAVA_HOME={{java64_home}}
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+      # We need to add the EWMA appender for the yarn daemons only;
+      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
+      # daemons. This is restrict the EWMA appender to daemons only.
+      INVOKER="${0##*/}"
+      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+      export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+      fi
+
+      # User for YARN daemons
+      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+      # resolve links - $0 may be a softlink
+      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+      # some Java parameters
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      if [ "$JAVA_HOME" != "" ]; then
+      #echo "run java in $JAVA_HOME"
+      JAVA_HOME=$JAVA_HOME
+      fi
+
+      if [ "$JAVA_HOME" = "" ]; then
+      echo "Error: JAVA_HOME is not set."
+      exit 1
+      fi
+
+      JAVA=$JAVA_HOME/bin/java
+      JAVA_HEAP_MAX=-Xmx1000m
+
+      # For setting YARN specific HEAP sizes please use this
+      # Parameter and set appropriately
+      YARN_HEAPSIZE={{yarn_heapsize}}
+
+      # check envvars which might override default args
+      if [ "$YARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+      fi
+
+      # Resource Manager specific parameters
+
+      # Specify the max Heapsize for the ResourceManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_RESOURCEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+      # Specify the JVM options to be used when starting the ResourceManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_RESOURCEMANAGER_OPTS=
+
+      # Node Manager specific parameters
+
+      # Specify the max Heapsize for the NodeManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_NODEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+      # Specify the max Heapsize for the timeline server using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1024.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_TIMELINESERVER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+      # Specify the JVM options to be used when starting the NodeManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_NODEMANAGER_OPTS=
+
+      # so that filenames w/ spaces are handled correctly in loops below
+      IFS=
+
+
+      # default log directory and file
+      if [ "$YARN_LOG_DIR" = "" ]; then
+      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+      fi
+      if [ "$YARN_LOGFILE" = "" ]; then
+      YARN_LOGFILE='yarn.log'
+      fi
+
+      # default policy file for service-level authorization
+      if [ "$YARN_POLICYFILE" = "" ]; then
+      YARN_POLICYFILE="hadoop-policy.xml"
+      fi
+
+      # restore ordinary behaviour
+      unset IFS
+
+
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+      fi
+      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>service_check.queue.name</name>
+    <value>default</value>
+    <description>
+      The queue that used by service check.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
new file mode 100644
index 0000000..1d828ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>yarn-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+      #Relative to Yarn Log Dir Prefix
+      yarn.log.dir=.
+      #
+      # Job Summary Appender
+      #
+      # Use following logger to send summary to separate file defined by
+      # hadoop.mapreduce.jobsummary.log.file rolled daily:
+      # hadoop.mapreduce.jobsummary.logger=INFO,JSA
+      #
+      hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+      hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+      log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+      # Set the ResourceManager summary log filename
+      yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+      # Set the ResourceManager summary log level and appender
+      yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+      #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+      # To enable AppSummaryLogging for the RM,
+      # set yarn.server.resourcemanager.appsummary.logger to
+      # LEVEL,RMSUMMARY in hadoop-env.sh
+
+      # Appender for ResourceManager Application Summary Log
+      # Requires the following properties to be set
+      #    - hadoop.log.dir (Hadoop Log directory)
+      #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+      #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+      log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+      log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+      log4j.appender.RMSUMMARY.MaxFileSize=256MB
+      log4j.appender.RMSUMMARY.MaxBackupIndex=20
+      log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+      log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+      log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+      # Appender for viewing information for errors and warnings
+      yarn.ewma.cleanupInterval=300
+      yarn.ewma.messageAgeLimitSeconds=86400
+      yarn.ewma.maxUniqueMessages=250
+      log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+      log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+      log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+      log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+      # Audit logging for ResourceManager
+      rm.audit.logger=${hadoop.root.logger}
+      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+      log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+      log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+      # Audit logging for NodeManager
+      nm.audit.logger=${hadoop.root.logger}
+      log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+      log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+      log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
new file mode 100644
index 0000000..95cf0c9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>service_name</name>
+    <display-name>Service name</display-name>
+    <description>Service name for Logsearch Portal (label)</description>
+    <value>YARN</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>component_mappings</name>
+    <display-name>Component mapping</display-name>
+    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
+    <value>RESOURCEMANAGER:yarn_resourcemanager,yarn_historyserver,yarn_jobsummary;NODEMANAGER:yarn_nodemanager;APP_TIMELINE_SERVER:yarn_timelineserver</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>Logfeeder Config</display-name>
+    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
+    <value>
+{
+  "input":[
+    {
+      "type":"yarn_nodemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log"
+    },
+    {
+      "type":"yarn_resourcemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log"
+    },
+    {
+      "type":"yarn_timelineserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log"
+    },
+    {
+      "type":"yarn_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-historyserver-*.log"
+    },
+    {
+      "type":"yarn_jobsummary",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log"
+    }
+   ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "yarn_historyserver",
+            "yarn_jobsummary",
+            "yarn_nodemanager",
+            "yarn_resourcemanager",
+            "yarn_timelineserver"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+         }
+       }
+     }
+   ]
+}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
new file mode 100644
index 0000000..01c3b47
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
@@ -0,0 +1,1151 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- ResourceManager -->
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+    <display-name>Minimum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>5120</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+    <display-name>Maximum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- NodeManager -->
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+    <display-name>Memory allocated for all YARN containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,{{stack_root}}/current/hadoop-client/*,{{stack_root}}/current/hadoop-client/lib/*,{{stack_root}}/current/hadoop-hdfs-client/*,{{stack_root}}/current/hadoop-hdfs-client/lib/*,{{stack_root}}/current/hadoop-yarn-client/*,{{stack_root}}/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+    <display-name>Virtual Memory Ratio</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.1</minimum>
+      <maximum>5.0</maximum>
+      <increment-step>0.1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>cluster-env</type>
+        <name>user_group</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log.retain-seconds</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+    <display-name>Enable Log Aggregation</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.https.address</name>
+    <value>localhost:8090</value>
+    <description>
+      The https address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+    <description>
+      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of hostname:port pairs defining the zookeeper quorum binding for the registry
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>Enable the node manager to recover after starting</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.dir</name>
+    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
+    <description>
+      The local filesystem directory in which the node manager will store
+      state when recovery is enabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+    <value>10000</value>
+    <description>Time interval between each attempt to connect to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
+    <value>60000</value>
+    <description>Max time to wait to establish a connection to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+    <display-name>Enable Work Preserving Restart</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+    <description>
+      The class to use as the persistent store.
+      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
+      the store is implicitly fenced; meaning a single ResourceManager
+      is able to use the store at any point in time.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
+    <value>/rmstore</value>
+    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-acl</name>
+    <value>world:anyone:rwcda</value>
+    <description>ACL's to be used for ZooKeeper znodes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
+    <value>10000</value>
+    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
+    <value>1000</value>
+    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+      When HA is enabled, the value here is NOT used. It is generated
+      automatically from yarn.resourcemanager.zk-timeout-ms and
+      yarn.resourcemanager.zk-num-retries."
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-num-retries</name>
+    <value>1000</value>
+    <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-timeout-ms</name>
+    <value>10000</value>
+    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
+    <value>${yarn.resourcemanager.max-completed-applications}</value>
+    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value> </value>
+    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
+    <value>/cgroup</value>
+    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description>Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
+    <value>1000</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+    <value>90</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <value>3600</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
+    <value>false</value>
+    <description>
+      This configuration is for debug and test purpose.
+      By setting this configuration as true.
+      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
+    <value>10</value>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.max-retries</name>
+    <value>30</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.retry-interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <description>
+      Enable age off of timeline store data.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.state-store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
+    <description>Store class name for timeline state store.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-state-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb state store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb timeline store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
+    <value>104857600</value>
+    <description>
+      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value>/system/yarn/node-labels</value>
+    <description>
+      URI for NodeLabelManager.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description/>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description/>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>core-site</type>
+        <name>hadoop.security.authentication</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- In HDP 2.3, these properties were deleted:
+yarn.node-labels.manager-class
+-->
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
+    <value>true</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.authorization-provider</name>
+    <description> Yarn authorization provider class. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!--ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.version</name>
+    <value>1.5</value>
+    <description>Timeline service version we&#x2019;re currently using.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
+    <description>Main storage class for YARN timeline server.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
+    <value>/ats/active/</value>
+    <description>DFS path to store active application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
+    <value>/ats/done/</value>
+    <description>DFS path to store done application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
+    <value/>
+    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- advanced ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
+    <!-- Use rolling leveldb, advanced -->
+    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
+    <!-- Default is 60 seconds, advanced -->
+    <value>60</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
+    <!-- 3600 is default, advanced -->
+    <value>3600</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
+    <!-- 7 days is default, advanced -->
+    <value>604800</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.4 -->
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
+    <value>60000</value>
+    <description>The delay time ms to unregister container metrics after completion.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
+    <value/>
+    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
new file mode 100644
index 0000000..e690204
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
@@ -0,0 +1,278 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file


[48/51] [abbrv] ambari git commit: AMBARI-19077: Ambari-server: Gather dependent configuration types and password properties for a service component

Posted by sm...@apache.org.
AMBARI-19077: Ambari-server: Gather dependent configuration types and password properties for a service component


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d22422ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d22422ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d22422ba

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: d22422ba18fff615416231574c45332a13c045d7
Parents: 24ac5cd
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Sat Dec 3 10:37:53 2016 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Fri Dec 9 07:56:40 2016 -0800

----------------------------------------------------------------------
 .../ambari_agent/CustomServiceOrchestrator.py   | 156 +++++++++++++------
 .../ambari/server/agent/ExecutionCommand.java   |  33 ++++
 .../AmbariManagementControllerImpl.java         |  13 ++
 .../ambari/server/state/ConfigHelper.java       |  45 ++++++
 .../ambari/server/state/PropertyInfo.java       |  15 ++
 5 files changed, 218 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d22422ba/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index f9ed4cf..5fd3068 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -72,6 +72,9 @@ class CustomServiceOrchestrator():
   DEFAULT_CREDENTIAL_CONF_DIR = '/var/lib/ambari-agent/cred/conf'
   DEFAULT_CREDENTIAL_SHELL_CMD = 'org.apache.hadoop.security.alias.CredentialShell'
 
+  # The property name used by the hadoop credential provider
+  CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
+
   def __init__(self, config, controller):
     self.config = config
     self.tmp_dir = config.get('agent', 'prefix')
@@ -147,21 +150,97 @@ class CustomServiceOrchestrator():
 
     return conf_dir
 
-  def getAffectedConfigTypes(self, commandJson):
+  def getConfigTypeCredentials(self, commandJson):
     """
     Gets the affected config types for the service in this command
+    with the password aliases and values.
+
+    Input:
+    {
+        "config-type1" : {
+          "password_key_name1":"password_value_name1",
+          "password_key_name2":"password_value_name2",
+            :
+        },
+        "config-type2" : {
+          "password_key_name1":"password_value_name1",
+          "password_key_name2":"password_value_name2",
+            :
+        },
+           :
+    }
+
+    Output:
+    {
+        "config-type1" : {
+          "alias1":"password1",
+          "alias2":"password2",
+            :
+        },
+        "config-type2" : {
+          "alias1":"password1",
+          "alias2":"password2",
+            :
+        },
+           :
+    }
+
+    If password_key_name is the same as password_value_name, then password_key_name is the password alias itself.
+    The value it points to is the password value.
+
+    If password_key_name is not the same as the password_value_name, then password_key_name points to the alias.
+    The value is pointed to by password_value_name.
+
+    For example:
+    Input:
+    {
+      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "oozie.service.JPAService.jdbc.password"},
+      "admin-properties" {"db_user":"db_password", "ranger.jpa.jdbc.credential.alias:ranger-admin-site" : "db_password"}
+    }
+
+    Output:
+    {
+      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "MyOozieJdbcPassword"},
+      "admin-properties" {"rangerdba" : "MyRangerDbaPassword", "rangeradmin":"MyRangerDbaPassword"},
+    }
 
     :param commandJson:
     :return:
     """
-    return commandJson.get('configuration_attributes')
-
-  def getCredentialProviderPropertyName(self):
-    """
-    Gets the property name used by the hadoop credential provider
-    :return:
-    """
-    return 'hadoop.security.credential.provider.path'
+    configtype_credentials = {}
+    if 'configuration_credentials' in commandJson:
+      for config_type, password_properties in commandJson['configuration_credentials'].items():
+        if config_type in commandJson['configurations']:
+          value_names = []
+          config = commandJson['configurations'][config_type]
+          credentials = {}
+          for key_name, value_name in password_properties.items():
+            if key_name == value_name:
+              if value_name in config:
+                # password name is the alias
+                credentials[key_name] = config[value_name]
+                value_names.append(value_name) # Gather the value_name for deletion
+            else:
+              keyname_keyconfig = key_name.split(':')
+              key_name = keyname_keyconfig[0]
+              # if the key is in another configuration (cross reference),
+              # get the value of the key from that configuration
+              if (len(keyname_keyconfig) > 1):
+                if keyname_keyconfig[1] not in commandJson['configurations']:
+                  continue
+                key_config = commandJson['configurations'][keyname_keyconfig[1]]
+              else:
+                key_config = config
+              if key_name in key_config and value_name in config:
+                # password name points to the alias
+                credentials[key_config[key_name]] = config[value_name]
+                value_names.append(value_name) # Gather the value_name for deletion
+          if len(credentials) > 0:
+            configtype_credentials[config_type] = credentials
+          for value_name in value_names:
+            # Remove the clear text password
+            config.pop(value_name, None)
+    return configtype_credentials
 
   def generateJceks(self, commandJson):
     """
@@ -178,16 +257,6 @@ class CustomServiceOrchestrator():
 
     logger.info('generateJceks: roleCommand={0}'.format(roleCommand))
 
-    # Password properties for a config type, if present,
-    # are under configuration_attributes:config_type:hidden:{prop1:attributes1, prop2, attributes2}
-    passwordProperties = {}
-    config_types = self.getAffectedConfigTypes(commandJson)
-    for config_type in config_types:
-      elem = config_types.get(config_type)
-      hidden = elem.get('hidden')
-      if hidden is not None:
-        passwordProperties[config_type] = hidden
-
     # Set up the variables for the external command to generate a JCEKS file
     java_home = commandJson['hostLevelParams']['java_home']
     java_bin = '{java_home}/bin/java'.format(java_home=java_home)
@@ -196,31 +265,30 @@ class CustomServiceOrchestrator():
     serviceName = commandJson['serviceName']
 
     # Gather the password values and remove them from the configuration
-    configs = commandJson.get('configurations')
-    for key, value in passwordProperties.items():
-      config = configs.get(key)
-      if config is not None:
-        file_path = os.path.join(self.getProviderDirectory(serviceName), "{0}.jceks".format(key))
-        if os.path.exists(file_path):
-          os.remove(file_path)
-        provider_path = 'jceks://file{file_path}'.format(file_path=file_path)
-        logger.info('provider_path={0}'.format(provider_path))
-        for alias in value:
-          pwd = config.get(alias)
-          if pwd is not None:
-            # Remove the clear text password
-            config.pop(alias, None)
-            # Add JCEKS provider path instead
-            config[self.getCredentialProviderPropertyName()] = provider_path
-            logger.debug("config={0}".format(config))
-            protected_pwd = PasswordString(pwd)
-            # Generate the JCEKS file
-            cmd = (java_bin, '-cp', cs_lib_path, self.credential_shell_cmd, 'create',
-                   alias, '-value', protected_pwd, '-provider', provider_path)
-            logger.info(cmd)
-            cmd_result = subprocess.call(cmd)
-            logger.info('cmd_result = {0}'.format(cmd_result))
-            os.chmod(file_path, 0644) # group and others should have read access so that the service user can read
+    provider_paths = [] # A service may depend on multiple configs
+    configtype_credentials = self.getConfigTypeCredentials(commandJson)
+    for config_type, credentials in configtype_credentials.items():
+      config = commandJson['configurations'][config_type]
+      file_path = os.path.join(self.getProviderDirectory(serviceName), "{0}.jceks".format(config_type))
+      if os.path.exists(file_path):
+        os.remove(file_path)
+      provider_path = 'jceks://file{file_path}'.format(file_path=file_path)
+      provider_paths.append(provider_path)
+      logger.info('provider_path={0}'.format(provider_path))
+      for alias, pwd in credentials.items():
+        logger.debug("config={0}".format(config))
+        protected_pwd = PasswordString(pwd)
+        # Generate the JCEKS file
+        cmd = (java_bin, '-cp', cs_lib_path, self.credential_shell_cmd, 'create',
+               alias, '-value', protected_pwd, '-provider', provider_path)
+        logger.info(cmd)
+        cmd_result = subprocess.call(cmd)
+        logger.info('cmd_result = {0}'.format(cmd_result))
+        os.chmod(file_path, 0644) # group and others should have read access so that the service user can read
+
+    if provider_paths:
+      # Add JCEKS provider paths instead
+      config[self.CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(provider_paths)
 
     return cmd_result
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d22422ba/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index e46167a..5c4f08e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -119,6 +119,39 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("credentialStoreEnabled")
   private String credentialStoreEnabled;
 
+  /**
+   * Map of config type to list of password properties
+   *   <pre>
+   *     {@code
+   *       {
+   *         "config_type1" :
+   *           {
+   *             "password_alias_name1:type1":"password_value_name1",
+   *             "password_alias_name2:type2":"password_value_name2",
+   *                 :
+   *           },
+   *         "config_type2" :
+   *           {
+   *             "password_alias_name1:type1":"password_value_name1",
+   *             "password_alias_name2:type2":"password_value_name2",
+   *                 :
+   *           },
+   *                 :
+   *       }
+   *     }
+   *   </pre>
+   */
+  @SerializedName("configuration_credentials")
+  private Map<String, Map<String, String>> configurationCredentials;
+
+  public void setConfigurationCredentials(Map<String, Map<String, String>> configurationCredentials) {
+    this.configurationCredentials = configurationCredentials;
+  }
+
+  public Map<String, Map<String, String>> getConfigurationCredentials() {
+    return this.configurationCredentials;
+  }
+
   public String getCommandId() {
     return commandId;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d22422ba/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 9bf046b..b19a46c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -341,6 +341,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   @Inject
   private AmbariActionExecutionHelper actionExecutionHelper;
 
+  private Map<String, Map<String, Map<String, String>>> configCredentialsForService = new HashMap<>();
+
   @Inject
   public AmbariManagementControllerImpl(ActionManager actionManager,
       Clusters clusters, Injector injector) throws Exception {
@@ -2143,6 +2145,17 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     Service clusterService = cluster.getService(serviceName);
     execCmd.setCredentialStoreEnabled(String.valueOf(clusterService.isCredentialStoreEnabled()));
 
+    // Get the map of service config type to password properties for the service
+    Map<String, Map<String, String>> configCredentials;
+    configCredentials = configCredentialsForService.get(clusterService.getName());
+    if (configCredentials == null) {
+      configCredentials = configHelper.getPropertiesWithPropertyType(stackId, clusterService,
+              PropertyType.PASSWORD);
+      configCredentialsForService.put(clusterService.getName(), configCredentials);
+    }
+
+    execCmd.setConfigurationCredentials(configCredentials);
+
     // Create a local copy for each command
     Map<String, String> commandParams = new TreeMap<String, String>();
     if (commandParamsInp != null) { // if not defined

http://git-wip-us.apache.org/repos/asf/ambari/blob/d22422ba/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 8cf62ad..5f8beaf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -41,6 +41,7 @@ import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.state.PropertyInfo.PropertyType;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.SecretReference;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -488,6 +489,50 @@ public class ConfigHelper {
     return result;
   }
 
+  /**
+   * Gets a map of config types to password property names to password property value names.
+   *
+   * @param stackId
+   * @param service
+   * @param propertyType
+   * @return
+   * @throws AmbariException
+     */
+  public Map<String, Map<String, String>> getPropertiesWithPropertyType(StackId stackId, Service service, PropertyType propertyType)
+          throws AmbariException {
+    StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+    Map<String, Map<String, String>> result = new HashMap<>();
+    Map<String, String> passwordProperties;
+    Set<PropertyInfo> serviceProperties = ambariMetaInfo.getServiceProperties(stack.getName(), stack.getVersion(), service.getName());
+    for (PropertyInfo serviceProperty : serviceProperties) {
+      if (serviceProperty.getPropertyTypes().contains(propertyType)) {
+        String stackPropertyConfigType = fileNameToConfigType(serviceProperty.getFilename());
+        passwordProperties = result.get(stackPropertyConfigType);
+        if (passwordProperties == null) {
+          passwordProperties = new HashMap<>();
+          result.put(stackPropertyConfigType, passwordProperties);
+        }
+        // If the password property is used by another property, it means the password property
+        // is a password value name while the use is the password alias name. If the user property
+        // is from another config type, include that in the password alias name as name:type.
+        if (serviceProperty.getUsedByProperties().size() > 0) {
+          for (PropertyDependencyInfo usedByProperty : serviceProperty.getUsedByProperties()) {
+            String propertyName = usedByProperty.getName();
+            if (!StringUtils.isEmpty(usedByProperty.getType())) {
+              propertyName += ':' + usedByProperty.getType();
+            }
+            passwordProperties.put(propertyName, serviceProperty.getName());
+          }
+        }
+        else {
+          passwordProperties.put(serviceProperty.getName(), serviceProperty.getName());
+        }
+      }
+    }
+
+    return result;
+  }
+
   public Set<String> getPropertyValuesWithPropertyType(StackId stackId, PropertyType propertyType,
       Cluster cluster, Map<String, DesiredConfig> desiredConfigs) throws AmbariException {
     StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/d22422ba/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index f89be4d..5881a10 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -76,6 +76,17 @@ public class PropertyInfo {
   private Set<PropertyDependencyInfo> dependedByProperties =
     new HashSet<PropertyDependencyInfo>();
 
+  /**
+   * The list of properties that use this property.
+   * Password properties may be used by other properties in
+   * the same config type or different config type, typically
+   * when asking for user name and password pairs.
+   */
+  @XmlElementWrapper(name="used-by")
+  @XmlElement(name = "property")
+  private Set<PropertyDependencyInfo> usedByProperties =
+          new HashSet<>();
+
   //This method is called after all the properties (except IDREF) are unmarshalled for this object,
   //but before this object is set to the parent object.
   void afterUnmarshal(Unmarshaller unmarshaller, Object parent) {
@@ -96,6 +107,10 @@ public class PropertyInfo {
     this.name = name;
   }
 
+  public Set<PropertyDependencyInfo> getUsedByProperties() {
+    return usedByProperties;
+  }
+
   public String getValue() {
     return value;
   }


[19/51] [abbrv] ambari git commit: AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)

Posted by sm...@apache.org.
AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51c6ef9e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51c6ef9e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51c6ef9e

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 51c6ef9e157991d96535ff6e0b86a6e9b8878883
Parents: 4f3a67d
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Thu Dec 8 15:57:04 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Thu Dec 8 18:35:34 2016 +0100

----------------------------------------------------------------------
 .../main/java/org/apache/ambari/server/agent/AgentRequests.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/51c6ef9e/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
index 2980f38..01195bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
@@ -45,9 +45,9 @@ public class AgentRequests {
 
   public void setExecutionDetailsRequest(String host, String component, String requestExecutionCmd) {
     if (StringUtils.isNotBlank(requestExecutionCmd)) {
-      LOG.debug("Setting need for exec command to " + requestExecutionCmd + " for " + component);
       Map<String, Boolean> perHostRequiresExecCmdDetails = getPerHostRequiresExecCmdDetails(host);
       if (Boolean.TRUE.toString().toUpperCase().equals(requestExecutionCmd.toUpperCase())) {
+        LOG.info("Setting need for exec command to " + requestExecutionCmd + " for " + component);
         perHostRequiresExecCmdDetails.put(component, Boolean.TRUE);
       } else {
         perHostRequiresExecCmdDetails.put(component, Boolean.FALSE);


[16/51] [abbrv] ambari git commit: Revert "AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)"

Posted by sm...@apache.org.
Revert "AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)"

This reverts commit ebe954bb104cf3425cc66f31b9e43d51a6719414.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8bdb7454
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8bdb7454
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8bdb7454

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 8bdb7454690b1af51402a996f63958c004c6ec22
Parents: 59f520b
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Thu Dec 8 15:42:18 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Thu Dec 8 15:42:18 2016 +0100

----------------------------------------------------------------------
 .../apache/ambari/server/agent/AgentRequests.java    |  2 +-
 .../ambari/server/agent/HeartbeatProcessor.java      | 15 +++++----------
 2 files changed, 6 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8bdb7454/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
index 01195bf..2980f38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
@@ -45,9 +45,9 @@ public class AgentRequests {
 
   public void setExecutionDetailsRequest(String host, String component, String requestExecutionCmd) {
     if (StringUtils.isNotBlank(requestExecutionCmd)) {
+      LOG.debug("Setting need for exec command to " + requestExecutionCmd + " for " + component);
       Map<String, Boolean> perHostRequiresExecCmdDetails = getPerHostRequiresExecCmdDetails(host);
       if (Boolean.TRUE.toString().toUpperCase().equals(requestExecutionCmd.toUpperCase())) {
-        LOG.info("Setting need for exec command to " + requestExecutionCmd + " for " + component);
         perHostRequiresExecCmdDetails.put(component, Boolean.TRUE);
       } else {
         perHostRequiresExecCmdDetails.put(component, Boolean.FALSE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bdb7454/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index 404419c..8f4782e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -19,6 +19,8 @@ package org.apache.ambari.server.agent;
 
 
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -99,8 +101,6 @@ public class HeartbeatProcessor extends AbstractService{
   //TODO rewrite to correlate with heartbeat frequency, hardcoded in agent as of now
   private long delay = 5000;
   private long period = 1000;
-  private long logIntervalSeconds = 30;
-  private long lastStatusMessage = 0;
 
   private int poolSize = 1;
 
@@ -179,19 +179,12 @@ public class HeartbeatProcessor extends AbstractService{
 
     @Override
     public void run() {
-
       while (shouldRun) {
         try {
-          long now = System.currentTimeMillis();
-          if ((now - lastStatusMessage) > (logIntervalSeconds * 1000)) {
-            LOG.info("Queue size: {}", heartBeatsQueue.size());
-            lastStatusMessage = now;
-          }
           HeartBeat heartbeat = pollHeartbeat();
           if (heartbeat == null) {
             break;
           }
-
           processHeartbeat(heartbeat);
         } catch (Exception e) {
           LOG.error("Exception received while processing heartbeat", e);
@@ -208,13 +201,13 @@ public class HeartbeatProcessor extends AbstractService{
   /**
    * Incapsulates logic for processing data from agent heartbeat
    * @param heartbeat Agent heartbeat object
-   * @param now
    * @throws AmbariException
    */
   public void processHeartbeat(HeartBeat heartbeat) throws AmbariException {
     long now = System.currentTimeMillis();
 
     processAlerts(heartbeat);
+
     //process status reports before command reports to prevent status override immediately after task finish
     processStatusReports(heartbeat);
     processCommandReports(heartbeat, now);
@@ -222,6 +215,8 @@ public class HeartbeatProcessor extends AbstractService{
     processHostStatus(heartbeat);
   }
 
+
+
   /**
    * Extracts all of the {@link Alert}s from the heartbeat and fires
    * {@link AlertEvent}s for each one. If there is a problem looking up the


[09/51] [abbrv] ambari git commit: AMBARI-19012 Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR

Posted by sm...@apache.org.
AMBARI-19012 Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR

Change-Id: I9501d854005e52153ed0bc38ba51d6b9113120c8


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a85000b8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a85000b8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a85000b8

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: a85000b8a1e9012f95b14894a44baf861792d4bf
Parents: aedf2c0
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Thu Dec 8 10:20:51 2016 +0100
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Thu Dec 8 10:20:51 2016 +0100

----------------------------------------------------------------------
 .../libraries/functions/package_conditions.py   |   2 +-
 .../0.5.0/configuration/logfeeder-env.xml       |  14 +
 .../0.5.0/configuration/logsearch-env.xml       |  94 ++++++-
 .../LOGSEARCH/0.5.0/metainfo.xml                |  34 +--
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |  79 +++---
 .../0.5.0/package/scripts/setup_logfeeder.py    |   2 +-
 .../0.5.0/package/scripts/setup_logsearch.py    |  10 +-
 .../0.5.0/properties/logfeeder-env.sh.j2        |   2 +-
 .../0.5.0/properties/logsearch-env.sh.j2        |   2 +-
 .../0.5.0/properties/output.config.json.j2      |   6 +-
 .../LOGSEARCH/0.5.0/themes/theme.json           | 253 +++++++++++++++++++
 .../stacks/HDP/2.2/services/stack_advisor.py    |  55 ++--
 .../test/python/stacks/2.4/configs/default.json |   2 +-
 ambari-web/app/data/HDP2/site_properties.js     |  36 +--
 14 files changed, 472 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
index 8257022..5a16061 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
@@ -70,7 +70,7 @@ def should_install_infra_solr():
 
 def should_install_infra_solr_client():
   config = Script.get_config()
-  return _has_applicable_local_component(config, ['INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN'])
+  return _has_applicable_local_component(config, ['INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN', 'LOGSEARCH_SERVER'])
 
 def should_install_logsearch_portal():
   config = Script.get_config()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
index ee885e3..aba638a 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-env.xml
@@ -119,6 +119,20 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>logfeeder_external_solr_kerberos_keytab</name>
+    <value></value>
+    <display-name>Logfeeder External Solr keytab</display-name>
+    <description>The path to the Kerberos Keytab file containing service principal of Logfeeder e.g. /etc/security/keytabs/logfeeder.service.keytab</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logfeeder_external_solr_kerberos_principal</name>
+    <value></value>
+    <display-name>Logfeeder External Solr principal</display-name>
+    <description>The service principal for Logfeeder which will be used to access SolrClient e.g. logfeeder/_HOST@REALM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>logfeeder_kerberos_keytab</name>
     <value>/etc/security/keytabs/logfeeder.service.keytab</value>
     <display-name>Logfeeder Solr keytab</display-name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
index c5b9b4e..2f13710 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-env.xml
@@ -99,20 +99,100 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch_solr_audit_logs_zk_node</name>
-    <value>{infra_solr_znode}</value>
-    <display-name>Solr Audit Logs Znode</display-name>
-    <description>Only needed if using custom solr cloud. E.g. /audit_logs</description>
+    <name>logsearch_use_external_solr</name>
+    <value>false</value>
+    <display-name>Use External Solr</display-name>
+    <description>Use External Solr to store logs</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_zk_znode</name>
+    <value></value>
+    <display-name>External Solr Logs Znode</display-name>
+    <description>Only needed if using custom solr cloud. E.g. /logsearch_solr</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch_solr_audit_logs_zk_quorum</name>
-    <value>{zookeeper_quorum}</value>
-    <display-name>Solr Audit Logs ZK Quorum</display-name>
+    <name>logsearch_external_solr_zk_quorum</name>
+    <value></value>
+    <display-name>External Solr Logs ZK Quorum</display-name>
     <description>Only needed if using custom solr cloud. E.g. zk1:2181,zk2:2182</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>logsearch_external_solr_ssl_enabled</name>
+    <value>false</value>
+    <display-name>Log Search External Solr SSL Enabled</display-name>
+    <description>Use SSL to connect to the external solr</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_enabled</name>
+    <value>false</value>
+    <display-name>Log Search External Solr Kerberos Enabled</display-name>
+    <description>Use Kerberos to connect to the external solr</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_keytab</name>
+    <value></value>
+    <display-name>Log Search External Solr keytab</display-name>
+    <description>The path to the Kerberos Keytab file containing service principal of Log Search e.g. /etc/security/keytabs/logsearch.service.keytab</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>logsearch_external_solr_kerberos_principal</name>
+    <value></value>
+    <display-name>Log Search External Solr principal</display-name>
+    <description>The service principal for Log Search which will be used to access SolrClient  e.g. logsearch/_HOST@REALM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>logsearch_truststore_location</name>
     <value>/etc/security/serverKeys/logsearch.trustStore.jks</value>
     <display-name>Log Search trust store location</display-name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
index ed74bdf..6921a8e 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
@@ -55,15 +55,6 @@
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
-
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
           </dependencies>
           <configuration-dependencies>
             <config-type>infra-solr-env</config-type>
@@ -95,20 +86,9 @@
             </log>
           </logs>
 
-          <dependencies>
-
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
-          </dependencies>
-
           <configuration-dependencies>
             <config-type>infra-solr-env</config-type>
+            <config-type>logsearch-env</config-type>
             <config-type>logfeeder-properties</config-type>
             <config-type>logfeeder-env</config-type>
             <config-type>logfeeder-grok</config-type>
@@ -135,6 +115,10 @@
               <skipUpgrade>true</skipUpgrade>
               <condition>should_install_logsearch_portal</condition>
             </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
           </packages>
         </osSpecific>
         <osSpecific>
@@ -149,6 +133,10 @@
               <skipUpgrade>true</skipUpgrade>
               <condition>should_install_logsearch_portal</condition>
             </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
@@ -159,10 +147,6 @@
         <timeout>300</timeout>
       </commandScript>
 
-      <requiredServices>
-        <service>AMBARI_INFRA</service>
-      </requiredServices>
-
       <quickLinksConfigurations>
         <quickLinksConfiguration>
           <fileName>quicklinks.json</fileName>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index d086f9f..c20d92d 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -38,7 +38,6 @@ def get_port_from_url(address):
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-stack_version = default("/commandParams/version", None)
 sudo = AMBARI_SUDO_BINARY
 security_enabled = status_params.security_enabled
 
@@ -56,16 +55,10 @@ logfeeder_pid_dir = status_params.logfeeder_pid_dir
 logfeeder_pid_file = status_params.logfeeder_pid_file
 
 user_group = config['configurations']['cluster-env']['user_group']
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
 
 # shared configs
 java64_home = config['hostLevelParams']['java_home']
-zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts_list.sort()
-# get comma separated list of zookeeper hosts from clusterHostInfo
-zookeeper_hosts = ",".join(zookeeper_hosts_list)
 cluster_name = str(config['clusterName'])
-availableServices = config['availableServices']
 
 configurations = config['configurations'] # need reference inside logfeeder jinja templates
 logserch_meta_configs = get_logsearch_meta_configs(configurations)
@@ -84,30 +77,31 @@ else:
 #####################################
 # Infra Solr configs
 #####################################
-infra_solr_znode = default('/configurations/infra-solr-env/infra_solr_znode', '/infra-solr')
-infra_solr_instance_count = len(config['clusterHostInfo']['infra_solr_hosts'])
-infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
-infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
-
-zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
-index = 0
-zookeeper_quorum = ""
-for host in config['clusterHostInfo']['zookeeper_hosts']:
-  zookeeper_quorum += host + ":" + str(zookeeper_port)
-  index += 1
-  if index < len(config['clusterHostInfo']['zookeeper_hosts']):
-    zookeeper_quorum += ","
+infra_solr_znode = '/infra-solr'
+infra_solr_ssl_enabled = False
+infra_solr_jmx_port = ''
 
+if 'infra-solr-env' in config['configurations']:
+  infra_solr_znode = default('/configurations/infra-solr-env/infra_solr_znode', '/infra-solr')
+  infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
+  infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
 
 if security_enabled:
   kinit_path_local = status_params.kinit_path_local
   _hostname_lowercase = config['hostname'].lower()
   logsearch_jaas_file = logsearch_server_conf + '/logsearch_jaas.conf'
   logfeeder_jaas_file = logsearch_logfeeder_conf + '/logfeeder_jaas.conf'
-  logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_kerberos_keytab']
-  logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_kerberos_principal'].replace('_HOST',_hostname_lowercase)
-  logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_kerberos_keytab']
-  logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+  use_external_solr_with_kerberos = default('configurations/logsearch-env/logsearch_external_solr_kerberos_enabled', False)
+  if use_external_solr_with_kerberos:
+    logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_external_solr_kerberos_keytab']
+    logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_external_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+    logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_external_solr_kerberos_keytab']
+    logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_external_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+  else:
+    logsearch_kerberos_keytab = config['configurations']['logsearch-env']['logsearch_kerberos_keytab']
+    logsearch_kerberos_principal = config['configurations']['logsearch-env']['logsearch_kerberos_principal'].replace('_HOST',_hostname_lowercase)
+    logfeeder_kerberos_keytab = config['configurations']['logfeeder-env']['logfeeder_kerberos_keytab']
+    logfeeder_kerberos_principal = config['configurations']['logfeeder-env']['logfeeder_kerberos_principal'].replace('_HOST',_hostname_lowercase)
 
 #####################################
 # Logsearch configs
@@ -120,10 +114,29 @@ logsearch_service_logs_merge_factor = config['configurations']['logsearch-servic
 logsearch_audit_logs_max_retention = config['configurations']['logsearch-audit_logs-solrconfig']['logsearch_audit_logs_max_retention']
 logsearch_audit_logs_merge_factor = config['configurations']['logsearch-audit_logs-solrconfig']['logsearch_audit_logs_merge_factor']
 
-logsearch_solr_audit_logs_zk_node = default('/configurations/logsearch-env/logsearch_solr_audit_logs_zk_node', infra_solr_znode)
-logsearch_solr_audit_logs_zk_quorum = default('/configurations/logsearch-env/logsearch_solr_audit_logs_zk_quorum', zookeeper_quorum)
-logsearch_solr_audit_logs_zk_node = format(logsearch_solr_audit_logs_zk_node)
-logsearch_solr_audit_logs_zk_quorum = format(logsearch_solr_audit_logs_zk_quorum)
+logsearch_use_external_solr = default('/configurations/logsearch-env/logsearch_use_external_solr', False)
+
+if logsearch_use_external_solr:
+  logsearch_solr_zk_znode = config['configurations']['logsearch-env']['logsearch_external_solr_zk_znode']
+  logsearch_solr_zk_quorum = config['configurations']['logsearch-env']['logsearch_external_solr_zk_quorum']
+  logsearch_solr_ssl_enabled = default('configurations/logsearch-env/logsearch_external_solr_ssl_enabled', False)
+  logsearch_solr_kerberos_enabled = security_enabled and default('configurations/logsearch-env/logsearch_external_solr_kerberos_enabled', False)
+else:
+  logsearch_solr_zk_znode = infra_solr_znode
+
+  logsearch_solr_zk_quorum = ""
+  zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+  if 'zookeeper_hosts' in config['clusterHostInfo']:
+    for host in config['clusterHostInfo']['zookeeper_hosts']:
+      if logsearch_solr_zk_quorum:
+        logsearch_solr_zk_quorum += ','
+      logsearch_solr_zk_quorum += host + ":" + str(zookeeper_port)
+  
+  logsearch_solr_ssl_enabled = infra_solr_ssl_enabled
+  logsearch_solr_kerberos_enabled = security_enabled
+
+zookeeper_quorum = logsearch_solr_zk_quorum
+
 
 
 # logsearch-env configs
@@ -179,8 +192,8 @@ logsearch_properties = {}
 
 # default values
 
-logsearch_properties['logsearch.solr.zk_connect_string'] = zookeeper_quorum + infra_solr_znode
-logsearch_properties['logsearch.solr.audit.logs.zk_connect_string'] = logsearch_solr_audit_logs_zk_quorum + logsearch_solr_audit_logs_zk_node
+logsearch_properties['logsearch.solr.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
+logsearch_properties['logsearch.solr.audit.logs.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
 
 logsearch_properties['logsearch.solr.collection.history'] = 'history'
 logsearch_properties['logsearch.solr.history.config.name'] = 'history'
@@ -210,7 +223,7 @@ del logsearch_properties['logsearch.solr.audit.logs.use.ranger']
 logsearch_properties['logsearch.solr.metrics.collector.hosts'] = format(logsearch_properties['logsearch.solr.metrics.collector.hosts'])
 logsearch_properties['logsearch.auth.external_auth.host_url'] = format(logsearch_properties['logsearch.auth.external_auth.host_url'])
 
-if security_enabled:
+if logsearch_solr_kerberos_enabled:
   logsearch_properties['logsearch.solr.kerberos.enable'] = 'true'
   logsearch_properties['logsearch.solr.jaas.file'] = logsearch_jaas_file
 
@@ -293,9 +306,9 @@ logfeeder_properties = dict(logfeeder_properties.items() + dict(config['configur
 
 logfeeder_properties['logfeeder.metrics.collector.hosts'] = format(logfeeder_properties['logfeeder.metrics.collector.hosts'])
 logfeeder_properties['logfeeder.config.files'] = format(logfeeder_properties['logfeeder.config.files'])
-logfeeder_properties['logfeeder.solr.zk_connect_string'] = zookeeper_quorum + infra_solr_znode
+logfeeder_properties['logfeeder.solr.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
 
-if security_enabled:
+if logsearch_solr_kerberos_enabled:
   if 'logfeeder.solr.kerberos.enable' not in logfeeder_properties:
     logfeeder_properties['logfeeder.solr.kerberos.enable'] = 'true'
   if 'logfeeder.solr.jaas.file' not in logfeeder_properties:

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
index 9582334..14f8d20 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
@@ -92,7 +92,7 @@ def setup_logfeeder():
          )
 
 
-  if params.security_enabled:
+  if params.logsearch_solr_kerberos_enabled:
     File(format("{logfeeder_jaas_file}"),
          content=Template("logfeeder_jaas.conf.j2")
          )

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
index 351126b..874b90b 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
@@ -118,10 +118,10 @@ def setup_logsearch():
 
 def upload_conf_set(config_set, solrconfig_content = None):
   import params
-  jaas_file = params.logsearch_jaas_file if params.security_enabled else None
+  jaas_file = params.logsearch_jaas_file if params.logsearch_solr_kerberos_enabled else None
   solr_cloud_util.upload_configuration_to_zk(
-    zookeeper_quorum=params.zookeeper_quorum,
-    solr_znode=params.infra_solr_znode,
+    zookeeper_quorum=params.logsearch_solr_zk_quorum,
+    solr_znode=params.logsearch_solr_zk_znode,
     config_set_dir=format("{logsearch_server_conf}/solr_configsets/{config_set}/conf"),
     config_set=config_set,
     tmp_dir=params.tmp_dir,
@@ -134,7 +134,7 @@ def upload_conf_set(config_set, solrconfig_content = None):
 def check_znode():
   import params
   solr_cloud_util.check_znode(
-    zookeeper_quorum=params.zookeeper_quorum,
-    solr_znode=params.infra_solr_znode,
+    zookeeper_quorum=params.logsearch_solr_zk_quorum,
+    solr_znode=params.logsearch_solr_zk_znode,
     java64_home=params.java64_home,
     retry=30, interval=5)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
index 2818708..6795dab 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2
@@ -32,7 +32,7 @@ if [ "$LOGFEEDER_JAVA_MEM" = "" ]; then
   export LOGFEEDER_JAVA_MEM=-Xmx{{logfeeder_max_mem}}
 fi
 
-{% if infra_solr_ssl_enabled %}
+{% if logsearch_solr_ssl_enabled %}
 export LOGFEEDER_SSL="true"
 export LOGFEEDER_KEYSTORE_LOCATION={{logfeeder_keystore_location}}
 export LOGFEEDER_KEYSTORE_PASSWORD={{logfeeder_keystore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
index eb7306c..a179983 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2
@@ -38,7 +38,7 @@ export LOGSEARCH_DEBUG={{logsearch_debug_enabled}}
 
 export LOGSEARCH_DEBUG_PORT={{logsearch_debug_port}}
 
-{% if infra_solr_ssl_enabled or logsearch_ui_protocol == 'https' or ambari_server_use_ssl %}
+{% if logsearch_solr_ssl_enabled or logsearch_ui_protocol == 'https' or ambari_server_use_ssl %}
 export LOGSEARCH_SSL="true"
 export LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}
 export LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
index 062d636..214e5ba 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/output.config.json.j2
@@ -21,7 +21,7 @@
       "comment":"Output to solr for service logs",
       "is_enabled":"{{solr_service_logs_enable}}",
       "destination":"solr",
-      "zk_connect_string":"{{zookeeper_quorum}}{{infra_solr_znode}}",
+      "zk_connect_string":"{{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}",
       "collection":"{{logsearch_solr_collection_service_logs}}",
       "number_of_shards": "{{logsearch_collection_service_logs_numshards}}",
       "splits_interval_mins": "{{logsearch_service_logs_split_interval_mins}}",
@@ -40,7 +40,7 @@
       "comment":"Output to solr for audit records",
       "is_enabled":"{{solr_audit_logs_enable}}",
       "destination":"solr",
-      "zk_connect_string":"{{zookeeper_quorum}}{{infra_solr_znode}}",
+      "zk_connect_string":"{{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}",
       "collection":"{{logsearch_solr_collection_audit_logs}}",
       "number_of_shards": "{{logsearch_collection_audit_logs_numshards}}",
       "splits_interval_mins": "{{logsearch_audit_logs_split_interval_mins}}",
@@ -57,5 +57,5 @@
     }
 
   ]
-
+  
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
index 0193689..2858825 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
@@ -57,6 +57,25 @@
                       "column-span": "1"
                     }
                   ]
+                },
+                {
+                  "name": "section-logsearch-solr-connection",
+                  "display-name": "Solr Connection",
+                  "row-index": "4",
+                  "column-index": "1",
+                  "row-span": "3",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "4",
+                  "subsections": [
+                    {
+                      "name": "subsection-logsearch-solr-connection-col1",
+                      "row-index": "4",
+                      "column-index": "0",
+                      "row-span": "3",
+                      "column-span": "1"
+                    }
+                  ]
                 }
               ]
             }
@@ -138,6 +157,186 @@
               }
             }
           ]
+        },
+        {
+          "config": "logsearch-env/logsearch_use_external_solr",
+          "subsection-name": "subsection-logsearch-solr-connection-col1"
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_zk_znode",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_zk_quorum",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_ssl_enabled",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_enabled",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_use_external_solr"
+              ],
+              "if": "${logsearch-env/logsearch_use_external_solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_keytab",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logsearch-env/logsearch_external_solr_kerberos_principal",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logfeeder-env/logfeeder_external_solr_kerberos_keytab",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "logfeeder-env/logfeeder_external_solr_kerberos_principal",
+          "subsection-name": "subsection-logsearch-solr-connection-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "logsearch-env/logsearch_external_solr_kerberos_enabled"
+              ],
+              "if": "${logsearch-env/logsearch_external_solr_kerberos_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
         }
       ]
     },
@@ -220,6 +419,60 @@
         "widget": {
           "type": "directories"
         }
+      },
+      {
+        "config": "logsearch-env/logsearch_use_external_solr",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_zk_znode",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_zk_quorum",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_ssl_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_keytab",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logsearch-env/logsearch_external_solr_kerberos_principal",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logfeeder-env/logfeeder_external_solr_kerberos_keytab",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "logfeeder-env/logfeeder_external_solr_kerberos_principal",
+        "widget": {
+          "type": "text-field"
+        }
       }
     ]
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 4f0a9d0..4802193 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -1015,27 +1015,50 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def recommendLogsearchConfigurations(self, configurations, clusterData, services, hosts):
     putLogsearchProperty = self.putProperty(configurations, "logsearch-properties", services)
+    putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
+    putLogsearchEnvProperty = self.putProperty(configurations, "logsearch-env", services)
+    putLogsearchEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-env")
+    putLogfeederEnvAttribute = self.putPropertyAttribute(configurations, "logfeeder-env")
+
     infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
 
-    if infraSolrHosts is not None and len(infraSolrHosts) > 0 \
-      and "logsearch-properties" in services["configurations"]:
+    if infraSolrHosts is not None and len(infraSolrHosts) > 0 and "logsearch-properties" in services["configurations"]:
+      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
+      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
+      
       recommendedMinShards = len(infraSolrHosts)
       recommendedShards = 2 * len(infraSolrHosts)
       recommendedMaxShards = 3 * len(infraSolrHosts)
-      # recommend number of shard
-      putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
-
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
-      # recommend replication factor
-      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
-      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
-      putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
-      putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    else:
+      recommendedReplicationFactor = 2
+      
+      recommendedMinShards = 1
+      recommendedShards = 1
+      recommendedMaxShards = 100
+      
+      putLogsearchEnvProperty('logsearch_use_external_solr', 'true')
+      putLogsearchEnvAttribute('logsearch_use_external_solr', 'visible', 'false')
+
+    # recommend number of shard
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
+
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
+    # recommend replication factor
+    putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
+    putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    
+    kerberos_authentication_enabled = self.isSecurityEnabled(services)
+    if not kerberos_authentication_enabled:
+       putLogsearchEnvProperty('logsearch_external_solr_kerberos_enabled', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_enabled', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_principal', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_principal', 'visible', 'false')
 
   def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},


[08/51] [abbrv] ambari git commit: AMBARI-19012 Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-server/src/test/python/stacks/2.4/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/configs/default.json b/ambari-server/src/test/python/stacks/2.4/configs/default.json
index a6e2478..1dbc611 100644
--- a/ambari-server/src/test/python/stacks/2.4/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.4/configs/default.json
@@ -274,7 +274,7 @@
         "logsearch_ui_protocol": "http",
         "logsearch_ui_port" : "61888",
         "logsearch_solr_audit_logs_use_ranger": "false",
-        "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#solr.url=http://{{solr_host}}:{{solr_port}}/solr\n\n#Service Logs and History colletion\nlogsearch.solr.zkhosts={{zookeeper_quorum}}{{solr_znode}}\nlogsearch.solr.collection.ser
 vice.logs={{logsearch_collection_service_logs}}\nlogsearch.solr.collection.history=history\n\nlogsearch.service.logs.split.interval.mins={{logsearch_service_logs_split_interval_mins}}\nlogsearch.collection.service.logs.numshards={{logsearch_collection_service_logs_numshards}}\nlogsearch.collection.service.logs.replication.factor={{logsearch_collection_service_logs_replication_factor}}\n\nlogsearch.service.logs.fields={{logsearch_service_logs_fields}}\n\n#Audit logs\nlogsearch.solr.audit.logs.zkhosts={{logsearch_solr_audit_logs_zk_quorum}}{{logsearch_solr_audit_logs_zk_node}}\nogsearch.solr.collection.audit.logs={{solr_collection_audit_logs}}\nlogsearch.solr.audit.logs.url={{logsearch_solr_audit_logs_url}}\n\nlogsearch.audit.logs.split.interval.mins={{logsearch_audit_logs_split_interval_mins}}\nlogsearch.collection.audit.logs.numshards={{logsearch_collection_audit_logs_numshards}}\nlogsearch.collection.audit.logs.replication.factor={{logsearch_collection_audit_logs_replication_factor
 }}\n{% if logsearch_solr_ssl_enabled %}\nexport LOGSEARCH_SSL=\"true\"\nexport LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}\nexport LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}\nexport LOGSEARCH_KEYSTORE_TYPE={{logsearch_keystore_type}}\nexport LOGSEARCH_TRUSTSTORE_LOCATION={{logsearch_truststore_location}}\nexport LOGSEARCH_TRUSTSTORE_PASSWORD={{logsearch_truststore_password}}\nexport LOGSEARCH_TRUSTSTORE_TYPE={{logsearch_truststore_type}}\n{% endif %}"
+        "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#solr.url=http://{{solr_host}}:{{solr_port}}/solr\n\n#Service Logs and History colletion\nlogsearch.solr.zkhosts={{zookeeper_quorum}}{{solr_znode}}\nlogsearch.solr.collection.ser
 vice.logs={{logsearch_collection_service_logs}}\nlogsearch.solr.collection.history=history\n\nlogsearch.service.logs.split.interval.mins={{logsearch_service_logs_split_interval_mins}}\nlogsearch.collection.service.logs.numshards={{logsearch_collection_service_logs_numshards}}\nlogsearch.collection.service.logs.replication.factor={{logsearch_collection_service_logs_replication_factor}}\n\nlogsearch.service.logs.fields={{logsearch_service_logs_fields}}\n\n#Audit logs\nlogsearch.solr.audit.logs.zkhosts={{logsearch_solr_zk_quorum}}{{logsearch_solr_zk_znode}}\nogsearch.solr.collection.audit.logs={{solr_collection_audit_logs}}\nlogsearch.solr.audit.logs.url={{logsearch_solr_audit_logs_url}}\n\nlogsearch.audit.logs.split.interval.mins={{logsearch_audit_logs_split_interval_mins}}\nlogsearch.collection.audit.logs.numshards={{logsearch_collection_audit_logs_numshards}}\nlogsearch.collection.audit.logs.replication.factor={{logsearch_collection_audit_logs_replication_factor}}\n{% if logsearch_s
 olr_ssl_enabled %}\nexport LOGSEARCH_SSL=\"true\"\nexport LOGSEARCH_KEYSTORE_LOCATION={{logsearch_keystore_location}}\nexport LOGSEARCH_KEYSTORE_PASSWORD={{logsearch_keystore_password}}\nexport LOGSEARCH_KEYSTORE_TYPE={{logsearch_keystore_type}}\nexport LOGSEARCH_TRUSTSTORE_LOCATION={{logsearch_truststore_location}}\nexport LOGSEARCH_TRUSTSTORE_PASSWORD={{logsearch_truststore_password}}\nexport LOGSEARCH_TRUSTSTORE_TYPE={{logsearch_truststore_type}}\n{% endif %}"
       },
       "logsearch-service_logs-solrconfig": {
         "content": "&lt;?xml version=\"1.0\" encoding=\"UTF-8\" ?&gt;\n&lt;!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n--&gt;\n\n&lt;!-- \n     For more details about configurations options that may appear in\n     this file, see http://wiki.apache.or
 g/solr/SolrConfigXml. \n--&gt;\n&lt;config&gt;\n  &lt;!-- In all configuration below, a prefix of \"solr.\" for class names\n       is an alias that causes solr to search appropriate packages,\n       including org.apache.solr.(search|update|request|core|analysis)\n\n       You may also specify a fully qualified Java classname if you\n       have your own custom plugins.\n    --&gt;\n\n  &lt;!-- Controls what version of Lucene various components of Solr\n       adhere to.  Generally, you want to use the latest version to\n       get all bug fixes and improvements. It is highly recommended\n       that you fully re-index after changing this setting as it can\n       affect both how text is indexed and queried.\n  --&gt;\n  &lt;luceneMatchVersion&gt;5.0.0&lt;/luceneMatchVersion&gt;\n\n  &lt;!-- &lt;lib/&gt; directives can be used to instruct Solr to load any Jars\n       identified and use them to resolve any \"plugins\" specified in\n       your solrconfig.xml or schema.xml (ie: Anal
 yzers, Request\n       Handlers, etc...).\n\n       All directories and paths are resolved relative to the\n       instanceDir.\n\n       Please note that &lt;lib/&gt; directives are processed in the order\n       that they appear in your solrconfig.xml file, and are \"stacked\" \n       on top of each other when building a ClassLoader - so if you have \n       plugin jars with dependencies on other jars, the \"lower level\" \n       dependency jars should be loaded first.\n\n       If a \"./lib\" directory exists in your instanceDir, all files\n       found in it are included as if you had used the following\n       syntax...\n       \n              &lt;lib dir=\"./lib\" /&gt;\n    --&gt;\n\n  &lt;!-- A 'dir' option by itself adds any files found in the directory \n       to the classpath, this is useful for including all jars in a\n       directory.\n\n       When a 'regex' is specified in addition to a 'dir', only the\n       files in that directory which completely match the reg
 ex\n       (anchored on both ends) will be included.\n\n       If a 'dir' option (with or without a regex) is used and nothing\n       is found that matches, a warning will be logged.\n\n       The examples below can be used to load some solr-contribs along \n       with their external dependencies.\n    --&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-dataimporthandler-.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/extraction/lib\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-cell-\\d.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/clustering/lib/\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-clustering-\\d.*\\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/langid/lib/\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-langid-\\d.*
 \\.jar\" /&gt;\n\n  &lt;lib dir=\"${solr.install.dir:../../../..}/contrib/velocity/lib\" regex=\".*\\.jar\" /&gt;\n  &lt;lib dir=\"${solr.install.dir:../../../..}/dist/\" regex=\"solr-velocity-\\d.*\\.jar\" /&gt;\n\n  &lt;!-- an exact 'path' can be used instead of a 'dir' to specify a \n       specific jar file.  This will cause a serious error to be logged \n       if it can't be loaded.\n    --&gt;\n  &lt;!--\n     &lt;lib path=\"../a-jar-that-does-not-exist.jar\" /&gt; \n  --&gt;\n  \n  &lt;!-- Data Directory\n\n       Used to specify an alternate directory to hold all index data\n       other than the default ./data under the Solr home.  If\n       replication is in use, this should match the replication\n       configuration.\n    --&gt;\n  &lt;dataDir&gt;${solr.data.dir:}&lt;/dataDir&gt;\n\n\n  &lt;!-- The DirectoryFactory to use for indexes.\n       \n       solr.StandardDirectoryFactory is filesystem\n       based and tries to pick the best implementation for the current\n  
      JVM and platform.  solr.NRTCachingDirectoryFactory, the default,\n       wraps solr.StandardDirectoryFactory and caches small files in memory\n       for better NRT performance.\n\n       One can force a particular implementation via solr.MMapDirectoryFactory,\n       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.\n\n       solr.RAMDirectoryFactory is memory based, not\n       persistent, and doesn't work with replication.\n    --&gt;\n  &lt;directoryFactory name=\"DirectoryFactory\" \n                    class=\"${solr.directoryFactory:solr.NRTCachingDirectoryFactory}\"&gt;\n    \n         \n    &lt;!-- These will be used if you are using the solr.HdfsDirectoryFactory,\n         otherwise they will be ignored. If you don't plan on using hdfs,\n         you can safely remove this section. --&gt;      \n    &lt;!-- The root directory that collection data should be written to. --&gt;     \n    &lt;str name=\"solr.hdfs.home\"&gt;${solr.hdfs.home:}&lt;/str&gt;\n    &
 lt;!-- The hadoop configuration files to use for the hdfs client. --&gt;    \n    &lt;str name=\"solr.hdfs.confdir\"&gt;${solr.hdfs.confdir:}&lt;/str&gt;\n    &lt;!-- Enable/Disable the hdfs cache. --&gt;    \n    &lt;str name=\"solr.hdfs.blockcache.enabled\"&gt;${solr.hdfs.blockcache.enabled:true}&lt;/str&gt;\n    &lt;!-- Enable/Disable using one global cache for all SolrCores. \n         The settings used will be from the first HdfsDirectoryFactory created. --&gt;    \n    &lt;str name=\"solr.hdfs.blockcache.global\"&gt;${solr.hdfs.blockcache.global:true}&lt;/str&gt;\n    \n  &lt;/directoryFactory&gt; \n\n  &lt;!-- The CodecFactory for defining the format of the inverted index.\n       The default implementation is SchemaCodecFactory, which is the official Lucene\n       index format, but hooks into the schema to provide per-field customization of\n       the postings lists and per-document values in the fieldType element\n       (postingsFormat/docValuesFormat). Note that most of
  the alternative implementations\n       are experimental, so if you choose to customize the index format, it's a good\n       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)\n       before upgrading to a newer version to avoid unnecessary reindexing.\n  --&gt;\n  &lt;codecFactory class=\"solr.SchemaCodecFactory\"/&gt;\n\n  &lt;!-- To enable dynamic schema REST APIs, use the following for &lt;schemaFactory&gt;: --&gt;\n  \n       &lt;schemaFactory class=\"ManagedIndexSchemaFactory\"&gt;\n         &lt;bool name=\"mutable\"&gt;true&lt;/bool&gt;\n         &lt;str name=\"managedSchemaResourceName\"&gt;managed-schema&lt;/str&gt;\n       &lt;/schemaFactory&gt;\n&lt;!--       \n       When ManagedIndexSchemaFactory is specified, Solr will load the schema from\n       the resource named in 'managedSchemaResourceName', rather than from schema.xml.\n       Note that the managed schema resource CANNOT be named schema.xml.  If the managed\n       schema 
 does not exist, Solr will create it after reading schema.xml, then rename\n       'schema.xml' to 'schema.xml.bak'. \n       \n       Do NOT hand edit the managed schema - external modifications will be ignored and\n       overwritten as a result of schema modification REST API calls.\n\n       When ManagedIndexSchemaFactory is specified with mutable = true, schema\n       modification REST API calls will be allowed; otherwise, error responses will be\n       sent back for these requests. \n\n  &lt;schemaFactory class=\"ClassicIndexSchemaFactory\"/&gt;\n  --&gt;\n\n  &lt;!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Index Config - These settings control low-level behavior of indexing\n       Most example settings here show the default value, but are commented\n       out, to more easily see where customizations have been made.\n       \n       Note: This replaces &lt;indexDefaults&gt; and &lt;mainIndex&gt; from older versions\n       ~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --&gt;\n  &lt;indexConfig&gt;\n    &lt;!-- maxFieldLength was removed in 4.0. To get similar behavior, include a \n         LimitTokenCountFilterFactory in your fieldType definition. E.g. \n     &lt;filter class=\"solr.LimitTokenCountFilterFactory\" maxTokenCount=\"10000\"/&gt;\n    --&gt;\n    &lt;!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 --&gt;\n    &lt;!-- &lt;writeLockTimeout&gt;1000&lt;/writeLockTimeout&gt;  --&gt;\n    &lt;!-- LogSearch customization to avoid timeouts --&gt;\n    &lt;writeLockTimeout&gt;10000&lt;/writeLockTimeout&gt;\n\n    &lt;!-- The maximum number of simultaneous threads that may be\n         indexing documents at once in IndexWriter; if more than this\n         many threads arrive they will wait for others to finish.\n         Default in Solr/Lucene is 8. --&gt;\n    &lt;!-- &lt;maxIndexingThreads&gt;8&lt;/maxIndexingThreads&gt;  --&gt;\n    &lt;!-- LogSearch cu
 stomization of increase performance --&gt;\n    &lt;maxIndexingThreads&gt;50&lt;/maxIndexingThreads&gt;\n\n    &lt;!-- Expert: Enabling compound file will use less files for the index, \n         using fewer file descriptors on the expense of performance decrease. \n         Default in Lucene is \"true\". Default in Solr is \"false\" (since 3.6) --&gt;\n    &lt;!-- &lt;useCompoundFile&gt;false&lt;/useCompoundFile&gt; --&gt;\n\n    &lt;!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene\n         indexing for buffering added documents and deletions before they are\n         flushed to the Directory.\n         maxBufferedDocs sets a limit on the number of documents buffered\n         before flushing.\n         If both ramBufferSizeMB and maxBufferedDocs is set, then\n         Lucene will flush based on whichever limit is hit first.\n         The default is 100 MB.  --&gt;\n    &lt;!-- &lt;ramBufferSizeMB&gt;100&lt;/ramBufferSizeMB&gt; --&gt;\n    &lt;!-- &lt;maxBuffe
 redDocs&gt;1000&lt;/maxBufferedDocs&gt; --&gt;\n\n    &lt;!-- Expert: Merge Policy \n         The Merge Policy in Lucene controls how merging of segments is done.\n         The default since Solr/Lucene 3.3 is TieredMergePolicy.\n         The default since Lucene 2.3 was the LogByteSizeMergePolicy,\n         Even older versions of Lucene used LogDocMergePolicy.\n      --&gt;\n    &lt;!--\n        &lt;mergePolicy class=\"org.apache.lucene.index.TieredMergePolicy\"&gt;\n          &lt;int name=\"maxMergeAtOnce\"&gt;10&lt;/int&gt;\n          &lt;int name=\"segmentsPerTier\"&gt;10&lt;/int&gt;\n        &lt;/mergePolicy&gt;\n      --&gt;\n       \n    &lt;!-- Merge Factor\n         The merge factor controls how many segments will get merged at a time.\n         For TieredMergePolicy, mergeFactor is a convenience parameter which\n         will set both MaxMergeAtOnce and SegmentsPerTier at once.\n         For LogByteSizeMergePolicy, mergeFactor decides how many new segments\n         will b
 e allowed before they are merged into one.\n         Default is 10 for both merge policies.\n      --&gt;\n    &lt;!-- \n    &lt;mergeFactor&gt;10&lt;/mergeFactor&gt;\n      --&gt;\n    &lt;!-- LogSearch customization. Increased to 25 to maximize indexing speed --&gt;\n    &lt;mergeFactor&gt;25&lt;/mergeFactor&gt;\n\n    &lt;!-- Expert: Merge Scheduler\n         The Merge Scheduler in Lucene controls how merges are\n         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)\n         can perform merges in the background using separate threads.\n         The SerialMergeScheduler (Lucene 2.2 default) does not.\n     --&gt;\n    &lt;!-- \n       &lt;mergeScheduler class=\"org.apache.lucene.index.ConcurrentMergeScheduler\"/&gt;\n       --&gt;\n\n    &lt;!-- LockFactory \n\n         This option specifies which Lucene LockFactory implementation\n         to use.\n      \n         single = SingleInstanceLockFactory - suggested for a\n                  read-only index or when th
 ere is no possibility of\n                  another process trying to modify the index.\n         native = NativeFSLockFactory - uses OS native file locking.\n                  Do not use when multiple solr webapps in the same\n                  JVM are attempting to share a single index.\n         simple = SimpleFSLockFactory  - uses a plain file for locking\n\n         Defaults: 'native' is default for Solr3.6 and later, otherwise\n                   'simple' is the default\n\n         More details on the nuances of each LockFactory...\n         http://wiki.apache.org/lucene-java/AvailableLockFactories\n    --&gt;\n    &lt;lockType&gt;${solr.lock.type:native}&lt;/lockType&gt;\n\n    &lt;!-- Unlock On Startup\n\n         If true, unlock any held write or commit locks on startup.\n         This defeats the locking mechanism that allows multiple\n         processes to safely access a lucene index, and should be used\n         with care. Default is \"false\".\n\n         This is not n
 eeded if lock type is 'single'\n     --&gt;\n    &lt;!--\n    &lt;unlockOnStartup&gt;false&lt;/unlockOnStartup&gt;\n      --&gt;\n\n    &lt;!-- Commit Deletion Policy\n         Custom deletion policies can be specified here. The class must\n         implement org.apache.lucene.index.IndexDeletionPolicy.\n\n         The default Solr IndexDeletionPolicy implementation supports\n         deleting index commit points on number of commits, age of\n         commit point and optimized status.\n         \n         The latest commit point should always be preserved regardless\n         of the criteria.\n    --&gt;\n    &lt;!-- \n    &lt;deletionPolicy class=\"solr.SolrDeletionPolicy\"&gt;\n    --&gt;\n      &lt;!-- The number of commit points to be kept --&gt;\n      &lt;!-- &lt;str name=\"maxCommitsToKeep\"&gt;1&lt;/str&gt; --&gt;\n      &lt;!-- The number of optimized commit points to be kept --&gt;\n      &lt;!-- &lt;str name=\"maxOptimizedCommitsToKeep\"&gt;0&lt;/str&gt; --&gt;\n      &l
 t;!--\n          Delete all commit points once they have reached the given age.\n          Supports DateMathParser syntax e.g.\n        --&gt;\n      &lt;!--\n         &lt;str name=\"maxCommitAge\"&gt;30MINUTES&lt;/str&gt;\n         &lt;str name=\"maxCommitAge\"&gt;1DAY&lt;/str&gt;\n      --&gt;\n    &lt;!-- \n    &lt;/deletionPolicy&gt;\n    --&gt;\n\n    &lt;!-- Lucene Infostream\n       \n         To aid in advanced debugging, Lucene provides an \"InfoStream\"\n         of detailed information when indexing.\n\n         Setting the value to true will instruct the underlying Lucene\n         IndexWriter to write its info stream to solr's log. By default,\n         this is enabled here, and controlled through log4j.properties.\n      --&gt;\n     &lt;infoStream&gt;true&lt;/infoStream&gt;\n  &lt;/indexConfig&gt;\n\n\n  &lt;!-- JMX\n       \n       This example enables JMX if and only if an existing MBeanServer\n       is found, use this if you want to configure JMX through JVM\n    
    parameters. Remove this to disable exposing Solr configuration\n       and statistics to JMX.\n\n       For more details see http://wiki.apache.org/solr/SolrJmx\n    --&gt;\n  &lt;jmx /&gt;\n  &lt;!-- If you want to connect to a particular server, specify the\n       agentId \n    --&gt;\n  &lt;!-- &lt;jmx agentId=\"myAgent\" /&gt; --&gt;\n  &lt;!-- If you want to start a new MBeanServer, specify the serviceUrl --&gt;\n  &lt;!-- &lt;jmx serviceUrl=\"service:jmx:rmi:///jndi/rmi://localhost:9999/solr\"/&gt;\n    --&gt;\n\n  &lt;!-- The default high-performance update handler --&gt;\n  &lt;updateHandler class=\"solr.DirectUpdateHandler2\"&gt;\n\n    &lt;!-- Enables a transaction log, used for real-time get, durability, and\n         and solr cloud replica recovery.  The log can grow as big as\n         uncommitted changes to the index, so use of a hard autoCommit\n         is recommended (see below).\n         \"dir\" - the target directory for transaction logs, defaults to the\n   
              solr data directory.  --&gt; \n    &lt;updateLog&gt;\n      &lt;str name=\"dir\"&gt;${solr.ulog.dir:}&lt;/str&gt;\n    &lt;/updateLog&gt;\n \n    &lt;!-- AutoCommit\n\n         Perform a hard commit automatically under certain conditions.\n         Instead of enabling autoCommit, consider using \"commitWithin\"\n         when adding documents. \n\n         http://wiki.apache.org/solr/UpdateXmlMessages\n\n         maxDocs - Maximum number of documents to add since the last\n                   commit before automatically triggering a new commit.\n\n         maxTime - Maximum amount of time in ms that is allowed to pass\n                   since a document was added before automatically\n                   triggering a new commit. \n         openSearcher - if false, the commit causes recent index changes\n           to be flushed to stable storage, but does not cause a new\n           searcher to be opened to make those changes visible.\n\n         If the updateLog is enab
 led, then it's highly recommended to\n         have some sort of hard autoCommit to limit the log size.\n      --&gt;\n     &lt;autoCommit&gt; \n       &lt;maxTime&gt;${solr.autoCommit.maxTime:15000}&lt;/maxTime&gt; \n       &lt;openSearcher&gt;false&lt;/openSearcher&gt; \n     &lt;/autoCommit&gt;\n\n    &lt;!-- softAutoCommit is like autoCommit except it causes a\n         'soft' commit which only ensures that changes are visible\n         but does not ensure that data is synced to disk.  This is\n         faster and more near-realtime friendly than a hard commit.\n      --&gt;\n\n     &lt;autoSoftCommit&gt; \n       &lt;maxTime&gt;${solr.autoSoftCommit.maxTime:5000}&lt;/maxTime&gt; \n     &lt;/autoSoftCommit&gt;\n\n    &lt;!-- Update Related Event Listeners\n         \n         Various IndexWriter related events can trigger Listeners to\n         take actions.\n\n         postCommit - fired after every commit or optimize command\n         postOptimize - fired after every optimize 
 command\n      --&gt;\n    &lt;!-- The RunExecutableListener executes an external command from a\n         hook such as postCommit or postOptimize.\n         \n         exe - the name of the executable to run\n         dir - dir to use as the current working directory. (default=\".\")\n         wait - the calling thread waits until the executable returns. \n                (default=\"true\")\n         args - the arguments to pass to the program.  (default is none)\n         env - environment variables to set.  (default is none)\n      --&gt;\n    &lt;!-- This example shows how RunExecutableListener could be used\n         with the script based replication...\n         http://wiki.apache.org/solr/CollectionDistribution\n      --&gt;\n    &lt;!--\n       &lt;listener event=\"postCommit\" class=\"solr.RunExecutableListener\"&gt;\n         &lt;str name=\"exe\"&gt;solr/bin/snapshooter&lt;/str&gt;\n         &lt;str name=\"dir\"&gt;.&lt;/str&gt;\n         &lt;bool name=\"wait\"&gt;true&lt;
 /bool&gt;\n         &lt;arr name=\"args\"&gt; &lt;str&gt;arg1&lt;/str&gt; &lt;str&gt;arg2&lt;/str&gt; &lt;/arr&gt;\n         &lt;arr name=\"env\"&gt; &lt;str&gt;MYVAR=val1&lt;/str&gt; &lt;/arr&gt;\n       &lt;/listener&gt;\n      --&gt;\n\n  &lt;/updateHandler&gt;\n  \n  &lt;!-- IndexReaderFactory\n\n       Use the following format to specify a custom IndexReaderFactory,\n       which allows for alternate IndexReader implementations.\n\n       ** Experimental Feature **\n\n       Please note - Using a custom IndexReaderFactory may prevent\n       certain other features from working. The API to\n       IndexReaderFactory may change without warning or may even be\n       removed from future releases if the problems cannot be\n       resolved.\n\n\n       ** Features that may not work with custom IndexReaderFactory **\n\n       The ReplicationHandler assumes a disk-resident index. Using a\n       custom IndexReader implementation may cause incompatibility\n       with ReplicationHandle
 r and may cause replication to not work\n       correctly. See SOLR-1366 for details.\n\n    --&gt;\n  &lt;!--\n  &lt;indexReaderFactory name=\"IndexReaderFactory\" class=\"package.class\"&gt;\n    &lt;str name=\"someArg\"&gt;Some Value&lt;/str&gt;\n  &lt;/indexReaderFactory &gt;\n  --&gt;\n\n  &lt;!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n       Query section - these settings control query time things like caches\n       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --&gt;\n  &lt;query&gt;\n    &lt;!-- Max Boolean Clauses\n\n         Maximum number of clauses in each BooleanQuery,  an exception\n         is thrown if exceeded.\n\n         ** WARNING **\n         \n         This option actually modifies a global Lucene property that\n         will affect all SolrCores.  If multiple solrconfig.xml files\n         disagree on this property, the value at any given moment will\n         be based on the last SolrCore to be initiali
 zed.\n         \n      --&gt;\n    &lt;maxBooleanClauses&gt;1024&lt;/maxBooleanClauses&gt;\n\n\n    &lt;!-- Solr Internal Query Caches\n\n         There are two implementations of cache available for Solr,\n         LRUCache, based on a synchronized LinkedHashMap, and\n         FastLRUCache, based on a ConcurrentHashMap.  \n\n         FastLRUCache has faster gets and slower puts in single\n         threaded operation and thus is generally faster than LRUCache\n         when the hit ratio of the cache is high (&gt; 75%), and may be\n         faster under other scenarios on multi-cpu systems.\n    --&gt;\n\n    &lt;!-- Filter Cache\n\n         Cache used by SolrIndexSearcher for filters (DocSets),\n         unordered sets of *all* documents that match a query.  When a\n         new searcher is opened, its caches may be prepopulated or\n         \"autowarmed\" using data from caches in the old searcher.\n         autowarmCount is the number of items to prepopulate.  For\n         LRUCa
 che, the autowarmed items will be the most recently\n         accessed items.\n\n         Parameters:\n           class - the SolrCache implementation LRUCache or\n               (LRUCache or FastLRUCache)\n           size - the maximum number of entries in the cache\n           initialSize - the initial capacity (number of entries) of\n               the cache.  (see java.util.HashMap)\n           autowarmCount - the number of entries to prepopulate from\n               and old cache.  \n      --&gt;\n    &lt;filterCache class=\"solr.FastLRUCache\"\n                 size=\"512\"\n                 initialSize=\"512\"\n                 autowarmCount=\"0\"/&gt;\n\n    &lt;!-- Query Result Cache\n         \n         Caches results of searches - ordered lists of document ids\n         (DocList) based on a query, a sort, and the range of documents requested.  \n      --&gt;\n    &lt;queryResultCache class=\"solr.LRUCache\"\n                     size=\"512\"\n                     initialS
 ize=\"512\"\n                     autowarmCount=\"0\"/&gt;\n   \n    &lt;!-- Document Cache\n\n         Caches Lucene Document objects (the stored fields for each\n         document).  Since Lucene internal document ids are transient,\n         this cache will not be autowarmed.  \n      --&gt;\n    &lt;documentCache class=\"solr.LRUCache\"\n                   size=\"512\"\n                   initialSize=\"512\"\n                   autowarmCount=\"0\"/&gt;\n    \n    &lt;!-- custom cache currently used by block join --&gt; \n    &lt;cache name=\"perSegFilter\"\n      class=\"solr.search.LRUCache\"\n      size=\"10\"\n      initialSize=\"0\"\n      autowarmCount=\"10\"\n      regenerator=\"solr.NoOpRegenerator\" /&gt;\n\n    &lt;!-- Field Value Cache\n         \n         Cache used to hold field values that are quickly accessible\n         by document id.  The fieldValueCache is created by default\n         even if not configured here.\n      --&gt;\n    &lt;!--\n       &lt;fieldValu
 eCache class=\"solr.FastLRUCache\"\n                        size=\"512\"\n                        autowarmCount=\"128\"\n                        showItems=\"32\" /&gt;\n      --&gt;\n\n    &lt;!-- Custom Cache\n\n         Example of a generic cache.  These caches may be accessed by\n         name through SolrIndexSearcher.getCache(),cacheLookup(), and\n         cacheInsert().  The purpose is to enable easy caching of\n         user/application level data.  The regenerator argument should\n         be specified as an implementation of solr.CacheRegenerator \n         if autowarming is desired.  \n      --&gt;\n    &lt;!--\n       &lt;cache name=\"myUserCache\"\n              class=\"solr.LRUCache\"\n              size=\"4096\"\n              initialSize=\"1024\"\n              autowarmCount=\"1024\"\n              regenerator=\"com.mycompany.MyRegenerator\"\n              /&gt;\n      --&gt;\n\n\n    &lt;!-- Lazy Field Loading\n\n         If true, stored fields that are not requested
  will be loaded\n         lazily.  This can result in a significant speed improvement\n         if the usual case is to not load all stored fields,\n         especially if the skipped fields are large compressed text\n         fields.\n    --&gt;\n    &lt;enableLazyFieldLoading&gt;true&lt;/enableLazyFieldLoading&gt;\n\n   &lt;!-- Use Filter For Sorted Query\n\n        A possible optimization that attempts to use a filter to\n        satisfy a search.  If the requested sort does not include\n        score, then the filterCache will be checked for a filter\n        matching the query. If found, the filter will be used as the\n        source of document ids, and then the sort will be applied to\n        that.\n\n        For most situations, this will not be useful unless you\n        frequently get the same search repeatedly with different sort\n        options, and none of them ever use \"score\"\n     --&gt;\n   &lt;!--\n      &lt;useFilterForSortedQuery&gt;true&lt;/useFilterForSorte
 dQuery&gt;\n     --&gt;\n\n   &lt;!-- Result Window Size\n\n        An optimization for use with the queryResultCache.  When a search\n        is requested, a superset of the requested number of document ids\n        are collected.  For example, if a search for a particular query\n        requests matching documents 10 through 19, and queryWindowSize is 50,\n        then documents 0 through 49 will be collected and cached.  Any further\n        requests in that range can be satisfied via the cache.  \n     --&gt;\n   &lt;queryResultWindowSize&gt;20&lt;/queryResultWindowSize&gt;\n\n   &lt;!-- Maximum number of documents to cache for any entry in the\n        queryResultCache. \n     --&gt;\n   &lt;queryResultMaxDocsCached&gt;200&lt;/queryResultMaxDocsCached&gt;\n\n   &lt;!-- Query Related Event Listeners\n\n        Various IndexSearcher related events can trigger Listeners to\n        take actions.\n\n        newSearcher - fired whenever a new searcher is being prepared\n        and 
 there is a current searcher handling requests (aka\n        registered).  It can be used to prime certain caches to\n        prevent long request times for certain requests.\n\n        firstSearcher - fired whenever a new searcher is being\n        prepared but there is no current registered searcher to handle\n        requests or to gain autowarming data from.\n\n        \n     --&gt;\n    &lt;!-- QuerySenderListener takes an array of NamedList and executes a\n         local query request for each NamedList in sequence. \n      --&gt;\n    &lt;listener event=\"newSearcher\" class=\"solr.QuerySenderListener\"&gt;\n      &lt;arr name=\"queries\"&gt;\n        &lt;!--\n           &lt;lst&gt;&lt;str name=\"q\"&gt;solr&lt;/str&gt;&lt;str name=\"sort\"&gt;price asc&lt;/str&gt;&lt;/lst&gt;\n           &lt;lst&gt;&lt;str name=\"q\"&gt;rocks&lt;/str&gt;&lt;str name=\"sort\"&gt;weight asc&lt;/str&gt;&lt;/lst&gt;\n          --&gt;\n      &lt;/arr&gt;\n    &lt;/listener&gt;\n    &lt;listener ev
 ent=\"firstSearcher\" class=\"solr.QuerySenderListener\"&gt;\n      &lt;arr name=\"queries\"&gt;\n        &lt;lst&gt;\n          &lt;str name=\"q\"&gt;static firstSearcher warming in solrconfig.xml&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/arr&gt;\n    &lt;/listener&gt;\n\n    &lt;!-- Use Cold Searcher\n\n         If a search request comes in and there is no current\n         registered searcher, then immediately register the still\n         warming searcher and use it.  If \"false\" then all requests\n         will block until the first searcher is done warming.\n      --&gt;\n    &lt;useColdSearcher&gt;false&lt;/useColdSearcher&gt;\n\n    &lt;!-- Max Warming Searchers\n         \n         Maximum number of searchers that may be warming in the\n         background concurrently.  An error is returned if this limit\n         is exceeded.\n\n         Recommend values of 1-2 for read-only slaves, higher for\n         masters w/o cache warming.\n      --&gt;\n    &lt;maxWarmingSearc
 hers&gt;2&lt;/maxWarmingSearchers&gt;\n\n  &lt;/query&gt;\n\n\n  &lt;!-- Request Dispatcher\n\n       This section contains instructions for how the SolrDispatchFilter\n       should behave when processing requests for this SolrCore.\n\n       handleSelect is a legacy option that affects the behavior of requests\n       such as /select?qt=XXX\n\n       handleSelect=\"true\" will cause the SolrDispatchFilter to process\n       the request and dispatch the query to a handler specified by the \n       \"qt\" param, assuming \"/select\" isn't already registered.\n\n       handleSelect=\"false\" will cause the SolrDispatchFilter to\n       ignore \"/select\" requests, resulting in a 404 unless a handler\n       is explicitly registered with the name \"/select\"\n\n       handleSelect=\"true\" is not recommended for new users, but is the default\n       for backwards compatibility\n    --&gt;\n  &lt;requestDispatcher handleSelect=\"false\" &gt;\n    &lt;!-- Request Parsing\n\n         The
 se settings indicate how Solr Requests may be parsed, and\n         what restrictions may be placed on the ContentStreams from\n         those requests\n\n         enableRemoteStreaming - enables use of the stream.file\n         and stream.url parameters for specifying remote streams.\n\n         multipartUploadLimitInKB - specifies the max size (in KiB) of\n         Multipart File Uploads that Solr will allow in a Request.\n         \n         formdataUploadLimitInKB - specifies the max size (in KiB) of\n         form data (application/x-www-form-urlencoded) sent via\n         POST. You can use POST to pass request parameters not\n         fitting into the URL.\n         \n         addHttpRequestToContext - if set to true, it will instruct\n         the requestParsers to include the original HttpServletRequest\n         object in the context map of the SolrQueryRequest under the \n         key \"httpRequest\". It will not be used by any of the existing\n         Solr components, bu
 t may be useful when developing custom \n         plugins.\n         \n         *** WARNING ***\n         The settings below authorize Solr to fetch remote files, You\n         should make sure your system has some authentication before\n         using enableRemoteStreaming=\"true\"\n\n      --&gt; \n    &lt;requestParsers enableRemoteStreaming=\"true\" \n                    multipartUploadLimitInKB=\"2048000\"\n                    formdataUploadLimitInKB=\"2048\"\n                    addHttpRequestToContext=\"false\"/&gt;\n\n    &lt;!-- HTTP Caching\n\n         Set HTTP caching related parameters (for proxy caches and clients).\n\n         The options below instruct Solr not to output any HTTP Caching\n         related headers\n      --&gt;\n    &lt;httpCaching never304=\"true\" /&gt;\n    &lt;!-- If you include a &lt;cacheControl&gt; directive, it will be used to\n         generate a Cache-Control header (as well as an Expires header\n         if the value contains \"max-age=\")\n
          \n         By default, no Cache-Control header is generated.\n         \n         You can use the &lt;cacheControl&gt; option even if you have set\n         never304=\"true\"\n      --&gt;\n    &lt;!--\n       &lt;httpCaching never304=\"true\" &gt;\n         &lt;cacheControl&gt;max-age=30, public&lt;/cacheControl&gt; \n       &lt;/httpCaching&gt;\n      --&gt;\n    &lt;!-- To enable Solr to respond with automatically generated HTTP\n         Caching headers, and to response to Cache Validation requests\n         correctly, set the value of never304=\"false\"\n         \n         This will cause Solr to generate Last-Modified and ETag\n         headers based on the properties of the Index.\n\n         The following options can also be specified to affect the\n         values of these headers...\n\n         lastModFrom - the default value is \"openTime\" which means the\n         Last-Modified value (and validation against If-Modified-Since\n         requests) will all be rel
 ative to when the current Searcher\n         was opened.  You can change it to lastModFrom=\"dirLastMod\" if\n         you want the value to exactly correspond to when the physical\n         index was last modified.\n\n         etagSeed=\"...\" is an option you can change to force the ETag\n         header (and validation against If-None-Match requests) to be\n         different even if the index has not changed (ie: when making\n         significant changes to your config file)\n\n         (lastModifiedFrom and etagSeed are both ignored if you use\n         the never304=\"true\" option)\n      --&gt;\n    &lt;!--\n       &lt;httpCaching lastModifiedFrom=\"openTime\"\n                    etagSeed=\"Solr\"&gt;\n         &lt;cacheControl&gt;max-age=30, public&lt;/cacheControl&gt; \n       &lt;/httpCaching&gt;\n      --&gt;\n  &lt;/requestDispatcher&gt;\n\n  &lt;!-- Request Handlers \n\n       http://wiki.apache.org/solr/SolrRequestHandler\n\n       Incoming queries will be dispatched 
 to a specific handler by name\n       based on the path specified in the request.\n\n       Legacy behavior: If the request path uses \"/select\" but no Request\n       Handler has that name, and if handleSelect=\"true\" has been specified in\n       the requestDispatcher, then the Request Handler is dispatched based on\n       the qt parameter.  Handlers without a leading '/' are accessed this way\n       like so: http://host/app/[core/]select?qt=name  If no qt is\n       given, then the requestHandler that declares default=\"true\" will be\n       used or the one named \"standard\".\n\n       If a Request Handler is declared with startup=\"lazy\", then it will\n       not be initialized until the first request that uses it.\n\n    --&gt;\n\n  &lt;requestHandler name=\"/dataimport\" class=\"solr.DataImportHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"config\"&gt;solr-data-config.xml&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Searc
 hHandler\n\n       http://wiki.apache.org/solr/SearchHandler\n\n       For processing Search Queries, the primary Request Handler\n       provided with Solr is \"SearchHandler\" It delegates to a sequent\n       of SearchComponents (see below) and supports distributed\n       queries across multiple shards\n    --&gt;\n  &lt;requestHandler name=\"/select\" class=\"solr.SearchHandler\"&gt;\n    &lt;!-- default values for query parameters can be specified, these\n         will be overridden by parameters in the request\n      --&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n       &lt;int name=\"rows\"&gt;10&lt;/int&gt;\n       &lt;str name=\"df\"&gt;text&lt;/str&gt;\n     &lt;/lst&gt;\n    &lt;!-- In addition to defaults, \"appends\" params can be specified\n         to identify values which should be appended to the list of\n         multi-val params from the query (or the existing \"defaults\").\n      --&gt;\n    &lt;!-- In th
 is example, the param \"fq=instock:true\" would be appended to\n         any query time fq params the user may specify, as a mechanism for\n         partitioning the index, independent of any user selected filtering\n         that may also be desired (perhaps as a result of faceted searching).\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"appends\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"appends\"&gt;\n         &lt;str name=\"fq\"&gt;inStock:true&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n    &lt;!-- \"invariants\" are a way of letting the Solr maintainer lock down\n         the options available to Solr clients.  Any params values\n         specified here are used regardless of what values may be specified\n         in either the query, the \"defaults\", or the \"appends\" params.\n\n         In this example, the facet.f
 ield and facet.query params would\n         be fixed, limiting the facets clients can use.  Faceting is\n         not turned on by default - but if the client does specify\n         facet=true in the request, these are the only facets they\n         will be able to see counts for; regardless of what other\n         facet.field or facet.query params they may specify.\n\n         NOTE: there is *absolutely* nothing a client can do to prevent these\n         \"invariants\" values from being used, so don't use this mechanism\n         unless you are sure you always want it.\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"invariants\"&gt;\n         &lt;str name=\"facet.field\"&gt;cat&lt;/str&gt;\n         &lt;str name=\"facet.field\"&gt;manu_exact&lt;/str&gt;\n         &lt;str name=\"facet.query\"&gt;price:[* TO 500]&lt;/str&gt;\n         &lt;str name=\"facet.query\"&gt;price:[500 TO *]&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n    &lt;!-- If the default list of SearchComponents 
 is not desired, that\n         list can either be overridden completely, or components can be\n         prepended or appended to the default list.  (see below)\n      --&gt;\n    &lt;!--\n       &lt;arr name=\"components\"&gt;\n         &lt;str&gt;nameOfCustomComponent1&lt;/str&gt;\n         &lt;str&gt;nameOfCustomComponent2&lt;/str&gt;\n       &lt;/arr&gt;\n      --&gt;\n    &lt;/requestHandler&gt;\n\n  &lt;!-- A request handler that returns indented JSON by default --&gt;\n  &lt;requestHandler name=\"/query\" class=\"solr.SearchHandler\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n       &lt;str name=\"wt\"&gt;json&lt;/str&gt;\n       &lt;str name=\"indent\"&gt;true&lt;/str&gt;\n       &lt;str name=\"df\"&gt;text&lt;/str&gt;\n     &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- realtime get handler, guaranteed to return the latest stored fields of\n       any document, without the need to commit or open a new searcher
 .  The\n       current implementation relies on the updateLog feature being enabled.\n\n       ** WARNING **\n       Do NOT disable the realtime get handler at /get if you are using\n       SolrCloud otherwise any leader election will cause a full sync in ALL\n       replicas for the shard in question. Similarly, a replica recovery will\n       also always fetch the complete index from the leader because a partial\n       sync will not be possible in the absence of this handler.\n  --&gt;\n  &lt;requestHandler name=\"/get\" class=\"solr.RealTimeGetHandler\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n       &lt;str name=\"omitHeader\"&gt;true&lt;/str&gt;\n       &lt;str name=\"wt\"&gt;json&lt;/str&gt;\n       &lt;str name=\"indent\"&gt;true&lt;/str&gt;\n     &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- A Robust Example\n\n       This example SearchHandler declaration shows off usage of the\n       SearchHandler with many defaults declared\n\n       Note that multiple instance
 s of the same Request Handler\n       (SearchHandler) can be registered multiple times with different\n       names (and different init parameters)\n    --&gt;\n  &lt;requestHandler name=\"/browse\" class=\"solr.SearchHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n\n      &lt;!-- VelocityResponseWriter settings --&gt;\n      &lt;str name=\"wt\"&gt;velocity&lt;/str&gt;\n      &lt;str name=\"v.template\"&gt;browse&lt;/str&gt;\n      &lt;str name=\"v.layout\"&gt;layout&lt;/str&gt;\n\n      &lt;!-- Query settings --&gt;\n      &lt;str name=\"defType\"&gt;edismax&lt;/str&gt;\n      &lt;str name=\"q.alt\"&gt;*:*&lt;/str&gt;\n      &lt;str name=\"rows\"&gt;10&lt;/str&gt;\n      &lt;str name=\"fl\"&gt;*,score&lt;/str&gt;\n\n      &lt;!-- Faceting defaults --&gt;\n      &lt;str name=\"facet\"&gt;on&lt;/str&gt;\n      &lt;str name=\"facet.mincount\"&gt;1&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;initParams
  path=\"/update/**,/query,/select,/tvrh,/elevate,/spell,/browse\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n      &lt;str name=\"update.chain\"&gt;add-unknown-fields-to-the-schema&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/initParams&gt;\n\n  &lt;!-- Update Request Handler.\n       \n       http://wiki.apache.org/solr/UpdateXmlMessages\n\n       The canonical Request Handler for Modifying the Index through\n       commands specified using XML, JSON, CSV, or JAVABIN\n\n       Note: Since solr1.1 requestHandlers requires a valid content\n       type header if posted in the body. For example, curl now\n       requires: -H 'Content-type:text/xml; charset=utf-8'\n       \n       To override the request content type and force a specific \n       Content-type, use the request parameter: \n         ?update.contentType=text/csv\n       \n       This handler will pick a response format to match the input\n       if the 'wt' parameter is not explicit\n  
   --&gt;\n  &lt;requestHandler name=\"/update\" class=\"solr.UpdateRequestHandler\"&gt;\n    &lt;!-- See below for information on defining \n         updateRequestProcessorChains that can be used by name \n         on each Update Request\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"defaults\"&gt;\n         &lt;str name=\"update.chain\"&gt;dedupe&lt;/str&gt;\n       &lt;/lst&gt;\n       --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Solr Cell Update Request Handler\n\n       http://wiki.apache.org/solr/ExtractingRequestHandler \n\n    --&gt;\n  &lt;requestHandler name=\"/update/extract\" \n                  startup=\"lazy\"\n                  class=\"solr.extraction.ExtractingRequestHandler\" &gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"lowernames\"&gt;true&lt;/str&gt;\n      &lt;str name=\"uprefix\"&gt;ignored_&lt;/str&gt;\n\n      &lt;!-- capture link hrefs but ignore div attributes --&gt;\n      &lt;str name=\"captureAttr\"&gt;true&lt;/str&gt;\n      &lt;str 
 name=\"fmap.a\"&gt;links&lt;/str&gt;\n      &lt;str name=\"fmap.div\"&gt;ignored_&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- Field Analysis Request Handler\n\n       RequestHandler that provides much the same functionality as\n       analysis.jsp. Provides the ability to specify multiple field\n       types and field names in the same request and outputs\n       index-time and query-time analysis for each of them.\n\n       Request parameters are:\n       analysis.fieldname - field name whose analyzers are to be used\n\n       analysis.fieldtype - field type whose analyzers are to be used\n       analysis.fieldvalue - text for index-time analysis\n       q (or analysis.q) - text for query time analysis\n       analysis.showmatch (true|false) - When set to true and when\n           query analysis is performed, the produced tokens of the\n           field value analysis will be marked as \"matched\" for every\n           token that is produces by the query
  analysis\n   --&gt;\n  &lt;requestHandler name=\"/analysis/field\" \n                  startup=\"lazy\"\n                  class=\"solr.FieldAnalysisRequestHandler\" /&gt;\n\n\n  &lt;!-- Document Analysis Handler\n\n       http://wiki.apache.org/solr/AnalysisRequestHandler\n\n       An analysis handler that provides a breakdown of the analysis\n       process of provided documents. This handler expects a (single)\n       content stream with the following format:\n\n       &lt;docs&gt;\n         &lt;doc&gt;\n           &lt;field name=\"id\"&gt;1&lt;/field&gt;\n           &lt;field name=\"name\"&gt;The Name&lt;/field&gt;\n           &lt;field name=\"text\"&gt;The Text Value&lt;/field&gt;\n         &lt;/doc&gt;\n         &lt;doc&gt;...&lt;/doc&gt;\n         &lt;doc&gt;...&lt;/doc&gt;\n         ...\n       &lt;/docs&gt;\n\n    Note: Each document must contain a field which serves as the\n    unique key. This key is used in the returned response to associate\n    an analysis breakdown t
 o the analyzed document.\n\n    Like the FieldAnalysisRequestHandler, this handler also supports\n    query analysis by sending either an \"analysis.query\" or \"q\"\n    request parameter that holds the query text to be analyzed. It\n    also supports the \"analysis.showmatch\" parameter which when set to\n    true, all field tokens that match the query tokens will be marked\n    as a \"match\". \n  --&gt;\n  &lt;requestHandler name=\"/analysis/document\" \n                  class=\"solr.DocumentAnalysisRequestHandler\" \n                  startup=\"lazy\" /&gt;\n\n  &lt;!-- Admin Handlers\n\n       Admin Handlers - This will register all the standard admin\n       RequestHandlers.  \n    --&gt;\n  &lt;requestHandler name=\"/admin/\" \n                  class=\"solr.admin.AdminHandlers\" /&gt;\n  &lt;!-- This single handler is equivalent to the following... --&gt;\n  &lt;!--\n     &lt;requestHandler name=\"/admin/luke\"       class=\"solr.admin.LukeRequestHandler\" /&gt;\n     &lt;
 requestHandler name=\"/admin/system\"     class=\"solr.admin.SystemInfoHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/plugins\"    class=\"solr.admin.PluginInfoHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/threads\"    class=\"solr.admin.ThreadDumpHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/properties\" class=\"solr.admin.PropertiesRequestHandler\" /&gt;\n     &lt;requestHandler name=\"/admin/file\"       class=\"solr.admin.ShowFileRequestHandler\" &gt;\n    --&gt;\n  &lt;!-- If you wish to hide files under ${solr.home}/conf, explicitly\n       register the ShowFileRequestHandler using the definition below. \n       NOTE: The glob pattern ('*') is the only pattern supported at present, *.xml will\n             not exclude all files ending in '.xml'. Use it to exclude _all_ updates\n    --&gt;\n  &lt;!--\n     &lt;requestHandler name=\"/admin/file\" \n                     class=\"solr.admin.ShowFileRequestHandler\" &gt;\n       &lt;lst name=\"invariants\"&
 gt;\n         &lt;str name=\"hidden\"&gt;synonyms.txt&lt;/str&gt; \n         &lt;str name=\"hidden\"&gt;anotherfile.txt&lt;/str&gt; \n         &lt;str name=\"hidden\"&gt;*&lt;/str&gt; \n       &lt;/lst&gt;\n     &lt;/requestHandler&gt;\n    --&gt;\n\n  &lt;!--\n    Enabling this request handler (which is NOT a default part of the admin handler) will allow the Solr UI to edit\n    all the config files. This is intended for secure/development use ONLY! Leaving available and publically\n    accessible is a security vulnerability and should be done with extreme caution!\n  --&gt;\n  &lt;!--\n  &lt;requestHandler name=\"/admin/fileedit\" class=\"solr.admin.EditFileRequestHandler\" &gt;\n    &lt;lst name=\"invariants\"&gt;\n         &lt;str name=\"hidden\"&gt;synonyms.txt&lt;/str&gt;\n         &lt;str name=\"hidden\"&gt;anotherfile.txt&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n  --&gt;\n  &lt;!-- ping/healthcheck --&gt;\n  &lt;requestHandler name=\"/admin/ping\" class=\"so
 lr.PingRequestHandler\"&gt;\n    &lt;lst name=\"invariants\"&gt;\n      &lt;str name=\"q\"&gt;solrpingquery&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;all&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;!-- An optional feature of the PingRequestHandler is to configure the \n         handler with a \"healthcheckFile\" which can be used to enable/disable \n         the PingRequestHandler.\n         relative paths are resolved against the data dir \n      --&gt;\n    &lt;!-- &lt;str name=\"healthcheckFile\"&gt;server-enabled.txt&lt;/str&gt; --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Echo the request contents back to the client --&gt;\n  &lt;requestHandler name=\"/debug/dump\" class=\"solr.DumpRequestHandler\" &gt;\n    &lt;lst name=\"defaults\"&gt;\n     &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt; \n     &lt;str name=\"echoHandler\"&gt;true&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/requestHandler&gt;\n  \n  &lt;!-- Solr Replica
 tion\n\n       The SolrReplicationHandler supports replicating indexes from a\n       \"master\" used for indexing and \"slaves\" used for queries.\n\n       http://wiki.apache.org/solr/SolrReplication \n\n       It is also necessary for SolrCloud to function (in Cloud mode, the\n       replication handler is used to bulk transfer segments when nodes \n       are added or need to recover).\n\n       https://wiki.apache.org/solr/SolrCloud/\n    --&gt;\n  &lt;requestHandler name=\"/replication\" class=\"solr.ReplicationHandler\" &gt; \n    &lt;!--\n       To enable simple master/slave replication, uncomment one of the \n       sections below, depending on whether this solr instance should be\n       the \"master\" or a \"slave\".  If this instance is a \"slave\" you will \n       also need to fill in the masterUrl to point to a real machine.\n    --&gt;\n    &lt;!--\n       &lt;lst name=\"master\"&gt;\n         &lt;str name=\"replicateAfter\"&gt;commit&lt;/str&gt;\n         &lt;str na
 me=\"replicateAfter\"&gt;startup&lt;/str&gt;\n         &lt;str name=\"confFiles\"&gt;schema.xml,stopwords.txt&lt;/str&gt;\n       &lt;/lst&gt;\n    --&gt;\n    &lt;!--\n       &lt;lst name=\"slave\"&gt;\n         &lt;str name=\"masterUrl\"&gt;http://your-master-hostname:8983/solr&lt;/str&gt;\n         &lt;str name=\"pollInterval\"&gt;00:00:60&lt;/str&gt;\n       &lt;/lst&gt;\n    --&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Search Components\n\n       Search components are registered to SolrCore and used by \n       instances of SearchHandler (which can access them by name)\n       \n       By default, the following components are available:\n       \n       &lt;searchComponent name=\"query\"     class=\"solr.QueryComponent\" /&gt;\n       &lt;searchComponent name=\"facet\"     class=\"solr.FacetComponent\" /&gt;\n       &lt;searchComponent name=\"mlt\"       class=\"solr.MoreLikeThisComponent\" /&gt;\n       &lt;searchComponent name=\"highlight\" class=\"solr.HighlightComponent\"
  /&gt;\n       &lt;searchComponent name=\"stats\"     class=\"solr.StatsComponent\" /&gt;\n       &lt;searchComponent name=\"debug\"     class=\"solr.DebugComponent\" /&gt;\n   \n       Default configuration in a requestHandler would look like:\n\n       &lt;arr name=\"components\"&gt;\n         &lt;str&gt;query&lt;/str&gt;\n         &lt;str&gt;facet&lt;/str&gt;\n         &lt;str&gt;mlt&lt;/str&gt;\n         &lt;str&gt;highlight&lt;/str&gt;\n         &lt;str&gt;stats&lt;/str&gt;\n         &lt;str&gt;debug&lt;/str&gt;\n       &lt;/arr&gt;\n\n       If you register a searchComponent to one of the standard names, \n       that will be used instead of the default.\n\n       To insert components before or after the 'standard' components, use:\n    \n       &lt;arr name=\"first-components\"&gt;\n         &lt;str&gt;myFirstComponentName&lt;/str&gt;\n       &lt;/arr&gt;\n    \n       &lt;arr name=\"last-components\"&gt;\n         &lt;str&gt;myLastComponentName&lt;/str&gt;\n       &lt;/arr&g
 t;\n\n       NOTE: The component registered with the name \"debug\" will\n       always be executed after the \"last-components\" \n       \n     --&gt;\n  \n   &lt;!-- Spell Check\n\n        The spell check component can return a list of alternative spelling\n        suggestions.  \n\n        http://wiki.apache.org/solr/SpellCheckComponent\n     --&gt;\n  &lt;searchComponent name=\"spellcheck\" class=\"solr.SpellCheckComponent\"&gt;\n\n    &lt;str name=\"queryAnalyzerFieldType\"&gt;key_lower_case&lt;/str&gt;\n\n    &lt;!-- Multiple \"Spell Checkers\" can be declared and used by this\n         component\n      --&gt;\n\n    &lt;!-- a spellchecker built from a field of the main index --&gt;\n    &lt;lst name=\"spellchecker\"&gt;\n      &lt;str name=\"name\"&gt;default&lt;/str&gt;\n      &lt;str name=\"field\"&gt;text&lt;/str&gt;\n      &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n      &lt;!-- the spellcheck distance measure used, the default is the internal
  levenshtein --&gt;\n      &lt;str name=\"distanceMeasure\"&gt;internal&lt;/str&gt;\n      &lt;!-- minimum accuracy needed to be considered a valid spellcheck suggestion --&gt;\n      &lt;float name=\"accuracy\"&gt;0.5&lt;/float&gt;\n      &lt;!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 --&gt;\n      &lt;int name=\"maxEdits\"&gt;2&lt;/int&gt;\n      &lt;!-- the minimum shared prefix when enumerating terms --&gt;\n      &lt;int name=\"minPrefix\"&gt;1&lt;/int&gt;\n      &lt;!-- maximum number of inspections per result. --&gt;\n      &lt;int name=\"maxInspections\"&gt;5&lt;/int&gt;\n      &lt;!-- minimum length of a query term to be considered for correction --&gt;\n      &lt;int name=\"minQueryLength\"&gt;4&lt;/int&gt;\n      &lt;!-- maximum threshold of documents a query term can appear to be considered for correction --&gt;\n      &lt;float name=\"maxQueryFrequency\"&gt;0.01&lt;/float&gt;\n      &lt;!-- uncomment this to require suggestions to occur in 
 1% of the documents\n        &lt;float name=\"thresholdTokenFrequency\"&gt;.01&lt;/float&gt;\n      --&gt;\n    &lt;/lst&gt;\n    \n    &lt;!-- a spellchecker that can break or combine words.  See \"/spell\" handler below for usage --&gt;\n    &lt;lst name=\"spellchecker\"&gt;\n      &lt;str name=\"name\"&gt;wordbreak&lt;/str&gt;\n      &lt;str name=\"classname\"&gt;solr.WordBreakSolrSpellChecker&lt;/str&gt;      \n      &lt;str name=\"field\"&gt;name&lt;/str&gt;\n      &lt;str name=\"combineWords\"&gt;true&lt;/str&gt;\n      &lt;str name=\"breakWords\"&gt;true&lt;/str&gt;\n      &lt;int name=\"maxChanges\"&gt;10&lt;/int&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- a spellchecker that uses a different distance measure --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"name\"&gt;jarowinkler&lt;/str&gt;\n         &lt;str name=\"field\"&gt;spell&lt;/str&gt;\n         &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n         &lt;str name
 =\"distanceMeasure\"&gt;\n           org.apache.lucene.search.spell.JaroWinklerDistance\n         &lt;/str&gt;\n       &lt;/lst&gt;\n     --&gt;\n\n    &lt;!-- a spellchecker that use an alternate comparator \n\n         comparatorClass be one of:\n          1. score (default)\n          2. freq (Frequency first, then score)\n          3. A fully qualified class name\n      --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"name\"&gt;freq&lt;/str&gt;\n         &lt;str name=\"field\"&gt;lowerfilt&lt;/str&gt;\n         &lt;str name=\"classname\"&gt;solr.DirectSolrSpellChecker&lt;/str&gt;\n         &lt;str name=\"comparatorClass\"&gt;freq&lt;/str&gt;\n      --&gt;\n\n    &lt;!-- A spellchecker that reads the list of words from a file --&gt;\n    &lt;!--\n       &lt;lst name=\"spellchecker\"&gt;\n         &lt;str name=\"classname\"&gt;solr.FileBasedSpellChecker&lt;/str&gt;\n         &lt;str name=\"name\"&gt;file&lt;/str&gt;\n         &lt;str name=\"so
 urceLocation\"&gt;spellings.txt&lt;/str&gt;\n         &lt;str name=\"characterEncoding\"&gt;UTF-8&lt;/str&gt;\n         &lt;str name=\"spellcheckIndexDir\"&gt;spellcheckerFile&lt;/str&gt;\n       &lt;/lst&gt;\n      --&gt;\n  &lt;/searchComponent&gt;\n  \n  &lt;!-- A request handler for demonstrating the spellcheck component.  \n\n       NOTE: This is purely as an example.  The whole purpose of the\n       SpellCheckComponent is to hook it into the request handler that\n       handles your normal user queries so that a separate request is\n       not needed to get suggestions.\n\n       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS\n       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!\n       \n       See http://wiki.apache.org/solr/SpellCheckComponent for details\n       on the request parameters.\n    --&gt;\n  &lt;requestHandler name=\"/spell\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&
 lt;/str&gt;\n      &lt;!-- Solr will use suggestions from both the 'default' spellchecker\n           and from the 'wordbreak' spellchecker and combine them.\n           collations (re-written queries) can include a combination of\n           corrections from both spellcheckers --&gt;\n      &lt;str name=\"spellcheck.dictionary\"&gt;default&lt;/str&gt;\n      &lt;str name=\"spellcheck.dictionary\"&gt;wordbreak&lt;/str&gt;\n      &lt;str name=\"spellcheck\"&gt;on&lt;/str&gt;\n      &lt;str name=\"spellcheck.extendedResults\"&gt;true&lt;/str&gt;       \n      &lt;str name=\"spellcheck.count\"&gt;10&lt;/str&gt;\n      &lt;str name=\"spellcheck.alternativeTermCount\"&gt;5&lt;/str&gt;\n      &lt;str name=\"spellcheck.maxResultsForSuggest\"&gt;5&lt;/str&gt;       \n      &lt;str name=\"spellcheck.collate\"&gt;true&lt;/str&gt;\n      &lt;str name=\"spellcheck.collateExtendedResults\"&gt;true&lt;/str&gt;  \n      &lt;str name=\"spellcheck.maxCollationTries\"&gt;10&lt;/str&gt;\n      &lt;str
  name=\"spellcheck.maxCollations\"&gt;5&lt;/str&gt;         \n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;spellcheck&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;searchComponent name=\"suggest\" class=\"solr.SuggestComponent\"&gt;\n   &lt;lst name=\"suggester\"&gt;\n      &lt;str name=\"name\"&gt;mySuggester&lt;/str&gt;\n      &lt;str name=\"lookupImpl\"&gt;FuzzyLookupFactory&lt;/str&gt;      &lt;!-- org.apache.solr.spelling.suggest.fst --&gt;\n      &lt;str name=\"dictionaryImpl\"&gt;DocumentDictionaryFactory&lt;/str&gt;     &lt;!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory --&gt; \n      &lt;str name=\"field\"&gt;cat&lt;/str&gt;\n      &lt;str name=\"weightField\"&gt;price&lt;/str&gt;\n      &lt;str name=\"suggestAnalyzerFieldType\"&gt;string&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;requestHandler name=\"/suggest\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst nam
 e=\"defaults\"&gt;\n      &lt;str name=\"suggest\"&gt;true&lt;/str&gt;\n      &lt;str name=\"suggest.count\"&gt;10&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"components\"&gt;\n      &lt;str&gt;suggest&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n  &lt;!-- Term Vector Component\n\n       http://wiki.apache.org/solr/TermVectorComponent\n    --&gt;\n  &lt;searchComponent name=\"tvComponent\" class=\"solr.TermVectorComponent\"/&gt;\n\n  &lt;!-- A request handler for demonstrating the term vector component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your \n       already specified request handlers. \n    --&gt;\n  &lt;requestHandler name=\"/tvrh\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n      &lt;bool name=\"tv\"&gt;true&lt;/bool&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;tvCom
 ponent&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Clustering Component\n\n       You'll need to set the solr.clustering.enabled system property\n       when running solr to run with clustering enabled:\n\n            java -Dsolr.clustering.enabled=true -jar start.jar\n\n       http://wiki.apache.org/solr/ClusteringComponent\n       http://carrot2.github.io/solr-integration-strategies/\n    --&gt;\n  &lt;searchComponent name=\"clustering\"\n                   enable=\"${solr.clustering.enabled:false}\"\n                   class=\"solr.clustering.ClusteringComponent\" &gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;lingo&lt;/str&gt;\n\n      &lt;!-- Class name of a clustering algorithm compatible with the Carrot2 framework.\n\n           Currently available open source algorithms are:\n           * org.carrot2.clustering.lingo.LingoClusteringAlgorithm\n           * org.carrot2.clustering.stc.STCClusteringAlgorithm\n           * org.carr
 ot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm\n\n           See http://project.carrot2.org/algorithms.html for more information.\n\n           A commercial algorithm Lingo3G (needs to be installed separately) is defined as:\n           * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm\n        --&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.lingo.LingoClusteringAlgorithm&lt;/str&gt;\n\n      &lt;!-- Override location of the clustering algorithm's resources \n           (attribute definitions and lexical resources).\n\n           A directory from which to load algorithm-specific stop words,\n           stop labels and attribute definition XMLs. \n\n           For an overview of Carrot2 lexical resources, see:\n           http://download.carrot2.org/head/manual/#chapter.lexical-resources\n\n           For an overview of Lingo3G lexical resources, see:\n           http://download.carrotsearch.com/lingo3g/manual/#chapter.lexical-resources\n      
  --&gt;\n      &lt;str name=\"carrot.resourcesDir\"&gt;clustering/carrot2&lt;/str&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- An example definition for the STC clustering algorithm. --&gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;stc&lt;/str&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.stc.STCClusteringAlgorithm&lt;/str&gt;\n    &lt;/lst&gt;\n\n    &lt;!-- An example definition for the bisecting kmeans clustering algorithm. --&gt;\n    &lt;lst name=\"engine\"&gt;\n      &lt;str name=\"name\"&gt;kmeans&lt;/str&gt;\n      &lt;str name=\"carrot.algorithm\"&gt;org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm&lt;/str&gt;\n    &lt;/lst&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;!-- A request handler for demonstrating the clustering component\n\n       This is purely as an example.\n\n       In reality you will likely want to add the component to your \n       already specified request handlers. \n    --&gt;\n  &lt;requestHandler nam
 e=\"/clustering\"\n                  startup=\"lazy\"\n                  enable=\"${solr.clustering.enabled:false}\"\n                  class=\"solr.SearchHandler\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;bool name=\"clustering\"&gt;true&lt;/bool&gt;\n      &lt;bool name=\"clustering.results\"&gt;true&lt;/bool&gt;\n      &lt;!-- Field name with the logical \"title\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.title\"&gt;name&lt;/str&gt;\n      &lt;!-- Field name with the logical \"URL\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.url\"&gt;id&lt;/str&gt;\n      &lt;!-- Field name with the logical \"content\" of a each document (optional) --&gt;\n      &lt;str name=\"carrot.snippet\"&gt;features&lt;/str&gt;\n      &lt;!-- Apply highlighter to the title/ content and use this for clustering. --&gt;\n      &lt;bool name=\"carrot.produceSummary\"&gt;true&lt;/bool&gt;\n      &lt;!-- the maximum number of labels per cluster --&gt;\n     
  &lt;!--&lt;int name=\"carrot.numDescriptions\"&gt;5&lt;/int&gt;--&gt;\n      &lt;!-- produce sub clusters --&gt;\n      &lt;bool name=\"carrot.outputSubClusters\"&gt;false&lt;/bool&gt;\n\n      &lt;!-- Configure the remaining request handler parameters. --&gt;\n      &lt;str name=\"defType\"&gt;edismax&lt;/str&gt;\n      &lt;str name=\"qf\"&gt;\n        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4\n      &lt;/str&gt;\n      &lt;str name=\"q.alt\"&gt;*:*&lt;/str&gt;\n      &lt;str name=\"rows\"&gt;10&lt;/str&gt;\n      &lt;str name=\"fl\"&gt;*,score&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;clustering&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n  \n  &lt;!-- Terms Component\n\n       http://wiki.apache.org/solr/TermsComponent\n\n       A component to return terms and document frequency of those\n       terms\n    --&gt;\n  &lt;searchComponent name=\"terms\" class=\"solr.TermsComponent\"/&gt;\n\n  &lt;!-
 - A request handler for demonstrating the terms component --&gt;\n  &lt;requestHandler name=\"/terms\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n     &lt;lst name=\"defaults\"&gt;\n      &lt;bool name=\"terms\"&gt;true&lt;/bool&gt;\n      &lt;bool name=\"distrib\"&gt;false&lt;/bool&gt;\n    &lt;/lst&gt;     \n    &lt;arr name=\"components\"&gt;\n      &lt;str&gt;terms&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n\n  &lt;!-- Query Elevation Component\n\n       http://wiki.apache.org/solr/QueryElevationComponent\n\n       a search component that enables you to configure the top\n       results for a given query regardless of the normal lucene\n       scoring.\n    --&gt;\n  &lt;searchComponent name=\"elevator\" class=\"solr.QueryElevationComponent\" &gt;\n    &lt;!-- pick a fieldType to analyze queries --&gt;\n    &lt;str name=\"queryFieldType\"&gt;string&lt;/str&gt;\n    &lt;str name=\"config-file\"&gt;elevate.xml&lt;/str&gt;\n  &lt;/searchComponent&gt;\n\n  &
 lt;!-- A request handler for demonstrating the elevator component --&gt;\n  &lt;requestHandler name=\"/elevate\" class=\"solr.SearchHandler\" startup=\"lazy\"&gt;\n    &lt;lst name=\"defaults\"&gt;\n      &lt;str name=\"echoParams\"&gt;explicit&lt;/str&gt;\n      &lt;str name=\"df\"&gt;text&lt;/str&gt;\n    &lt;/lst&gt;\n    &lt;arr name=\"last-components\"&gt;\n      &lt;str&gt;elevator&lt;/str&gt;\n    &lt;/arr&gt;\n  &lt;/requestHandler&gt;\n\n  &lt;!-- Highlighting Component\n\n       http://wiki.apache.org/solr/HighlightingParameters\n    --&gt;\n  &lt;searchComponent class=\"solr.HighlightComponent\" name=\"highlight\"&gt;\n    &lt;highlighting&gt;\n      &lt;!-- Configure the standard fragmenter --&gt;\n      &lt;!-- This could most likely be commented out in the \"default\" case --&gt;\n      &lt;fragmenter name=\"gap\" \n                  default=\"true\"\n                  class=\"solr.highlight.GapFragmenter\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;int 
 name=\"hl.fragsize\"&gt;100&lt;/int&gt;\n        &lt;/lst&gt;\n      &lt;/fragmenter&gt;\n\n      &lt;!-- A regular-expression-based fragmenter \n           (for sentence extraction) \n        --&gt;\n      &lt;fragmenter name=\"regex\" \n                  class=\"solr.highlight.RegexFragmenter\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;!-- slightly smaller fragsizes work better because of slop --&gt;\n          &lt;int name=\"hl.fragsize\"&gt;70&lt;/int&gt;\n          &lt;!-- allow 50% slop on fragment sizes --&gt;\n          &lt;float name=\"hl.regex.slop\"&gt;0.5&lt;/float&gt;\n          &lt;!-- a basic sentence pattern --&gt;\n          &lt;str name=\"hl.regex.pattern\"&gt;[-\\w ,/\\n\\&amp;quot;&amp;apos;]{20,200}&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/fragmenter&gt;\n\n      &lt;!-- Configure the standard formatter --&gt;\n      &lt;formatter name=\"html\" \n                 default=\"true\"\n                 class=\"solr.highlight.HtmlFormatter\"&gt;\n
         &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.simple.pre\"&gt;&lt;![CDATA[&lt;em&gt;]]&gt;&lt;/str&gt;\n          &lt;str name=\"hl.simple.post\"&gt;&lt;![CDATA[&lt;/em&gt;]]&gt;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/formatter&gt;\n\n      &lt;!-- Configure the standard encoder --&gt;\n      &lt;encoder name=\"html\" \n               class=\"solr.highlight.HtmlEncoder\" /&gt;\n\n      &lt;!-- Configure the standard fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"simple\" \n                       class=\"solr.highlight.SimpleFragListBuilder\"/&gt;\n      \n      &lt;!-- Configure the single fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"single\" \n                       class=\"solr.highlight.SingleFragListBuilder\"/&gt;\n      \n      &lt;!-- Configure the weighted fragListBuilder --&gt;\n      &lt;fragListBuilder name=\"weighted\" \n                       default=\"true\"\n                       class=\"solr.highlight.WeightedFragList
 Builder\"/&gt;\n      \n      &lt;!-- default tag FragmentsBuilder --&gt;\n      &lt;fragmentsBuilder name=\"default\" \n                        default=\"true\"\n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\"&gt;\n        &lt;!-- \n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.multiValuedSeparatorChar\"&gt;/&lt;/str&gt;\n        &lt;/lst&gt;\n        --&gt;\n      &lt;/fragmentsBuilder&gt;\n\n      &lt;!-- multi-colored tag FragmentsBuilder --&gt;\n      &lt;fragmentsBuilder name=\"colored\" \n                        class=\"solr.highlight.ScoreOrderFragmentsBuilder\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.tag.pre\"&gt;&lt;![CDATA[\n               &lt;b style=\"background:yellow\"&gt;,&lt;b style=\"background:lawgreen\"&gt;,\n               &lt;b style=\"background:aquamarine\"&gt;,&lt;b style=\"background:magenta\"&gt;,\n               &lt;b style=\"background:palegreen\"&gt;,&lt;b style=\"backgroun
 d:coral\"&gt;,\n               &lt;b style=\"background:wheat\"&gt;,&lt;b style=\"background:khaki\"&gt;,\n               &lt;b style=\"background:lime\"&gt;,&lt;b style=\"background:deepskyblue\"&gt;]]&gt;&lt;/str&gt;\n          &lt;str name=\"hl.tag.post\"&gt;&lt;![CDATA[&lt;/b&gt;]]&gt;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/fragmentsBuilder&gt;\n      \n      &lt;boundaryScanner name=\"default\" \n                       default=\"true\"\n                       class=\"solr.highlight.SimpleBoundaryScanner\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;str name=\"hl.bs.maxScan\"&gt;10&lt;/str&gt;\n          &lt;str name=\"hl.bs.chars\"&gt;.,!? &amp;#9;&amp;#10;&amp;#13;&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/boundaryScanner&gt;\n      \n      &lt;boundaryScanner name=\"breakIterator\" \n                       class=\"solr.highlight.BreakIteratorBoundaryScanner\"&gt;\n        &lt;lst name=\"defaults\"&gt;\n          &lt;!-- type should be one of CHARACTER
 , WORD(default), LINE and SENTENCE --&gt;\n          &lt;str name=\"hl.bs.type\"&gt;WORD&lt;/str&gt;\n          &lt;!-- language and country are used when constructing Locale object.  --&gt;\n          &lt;!-- And the Locale object will be used when getting instance of BreakIterator --&gt;\n          &lt;str name=\"hl.bs.language\"&gt;en&lt;/str&gt;\n          &lt;str name=\"hl.bs.country\"&gt;US&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/boundaryScanner&gt;\n    &lt;/highlighting&gt;\n  &lt;/searchComponent&gt;\n\n  &lt;!-- Update Processors\n\n       Chains of Update Processor Factories for dealing with Update\n       Requests can be declared, and then used by name in Update\n       Request Processors\n\n       http://wiki.apache.org/solr/UpdateRequestProcessor\n\n    --&gt; \n\n  &lt;!-- Add unknown fields to the schema \n  \n       An example field type guessing update processor that will\n       attempt to parse string-typed field values as Booleans, Longs,\n       Doubles, 
 or Dates, and then add schema fields with the guessed\n       field types.  \n       \n       This requires that the schema is both managed and mutable, by\n       declaring schemaFactory as ManagedIndexSchemaFactory, with\n       mutable specified as true. \n       \n       See http://wiki.apache.org/solr/GuessingFieldTypes\n    --&gt;\n  &lt;updateRequestProcessorChain name=\"add-unknown-fields-to-the-schema\"&gt;\n\n    &lt;processor class=\"solr.DefaultValueUpdateProcessorFactory\"&gt;\n        &lt;str name=\"fieldName\"&gt;_ttl_&lt;/str&gt;\n        &lt;str name=\"value\"&gt;+{{logsearch_service_logs_max_retention}}DAYS&lt;/str&gt;\n    &lt;/processor&gt;\n    &lt;processor class=\"solr.processor.DocExpirationUpdateProcessorFactory\"&gt;\n        &lt;int name=\"autoDeletePeriodSeconds\"&gt;30&lt;/int&gt;\n        &lt;str name=\"ttlFieldName\"&gt;_ttl_&lt;/str&gt;\n        &lt;str name=\"expirationFieldName\"&gt;_expire_at_&lt;/str&gt;\n    &lt;/processor&gt;\n    &lt;processor 
 class=\"solr.FirstFieldValueUpdateProcessorFactory\"&gt;\n      &lt;str name=\"fieldName\"&gt;_expire_at_&lt;/str&gt;\n    &lt;/processor&gt;\n\n\n    &lt;processor class=\"solr.RemoveBlankFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseBooleanFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseLongFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseDoubleFieldUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.ParseDateFieldUpdateProcessorFactory\"&gt;\n      &lt;arr name=\"format\"&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss.SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss,SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss.SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss,SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ssZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mm:ss&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH:mmZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd'T'HH
 :mm&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss.SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss,SSSZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss.SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss,SSS&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ssZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm:ss&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mmZ&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd HH:mm&lt;/str&gt;\n        &lt;str&gt;yyyy-MM-dd&lt;/str&gt;\n      &lt;/arr&gt;\n    &lt;/processor&gt;\n    &lt;processor class=\"solr.AddSchemaFieldsUpdateProcessorFactory\"&gt;\n      &lt;str name=\"defaultFieldType\"&gt;key_lower_case&lt;/str&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Boolean&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;booleans&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.util.Date&lt;/str&gt;\n        &lt;str nam
 e=\"fieldType\"&gt;tdates&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Long&lt;/str&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Integer&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;tlongs&lt;/str&gt;\n      &lt;/lst&gt;\n      &lt;lst name=\"typeMapping\"&gt;\n        &lt;str name=\"valueClass\"&gt;java.lang.Number&lt;/str&gt;\n        &lt;str name=\"fieldType\"&gt;tdoubles&lt;/str&gt;\n      &lt;/lst&gt;\n    &lt;/processor&gt;\n\n    &lt;processor class=\"solr.LogUpdateProcessorFactory\"/&gt;\n    &lt;processor class=\"solr.RunUpdateProcessorFactory\"/&gt;\n  &lt;/updateRequestProcessorChain&gt;\n\n\n  &lt;!-- Deduplication\n\n       An example dedup update processor that creates the \"id\" field\n       on the fly based on the hash code of some other fields.  This\n       example has overwriteDupes set to false since we are using the\n       id field as the signatureField and Solr will ma
 intain\n       uniqueness based on that anyway.  \n       \n    --&gt;\n  &lt;!--\n     &lt;updateRequestProcessorChain name=\"dedupe\"&gt;\n       &lt;processor class=\"solr.processor.SignatureUpdateProcessorFactory\"&gt;\n         &lt;bool name=\"enabled\"&gt;true&lt;/bool&gt;\n         &lt;str name=\"signatureField\"&gt;id&lt;/str&gt;\n         &lt;bool name=\"overwriteDupes\"&gt;false&lt;/bool&gt;\n         &lt;str name=\"fields\"&gt;name,features,cat&lt;/str&gt;\n         &lt;str name=\"signatureClass\"&gt;solr.processor.Lookup3Signature&lt;/str&gt;\n       &lt;/processor&gt;\n       &lt;processor class=\"solr.LogUpdateProcessorFactory\" /&gt;\n       &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n     &lt;/updateRequestProcessorChain&gt;\n    --&gt;\n  \n  &lt;!-- Language identification\n\n       This example update chain identifies the language of the incoming\n       documents using the langid contrib. The detected language is\n       written to field languag
 e_s. No field name mapping is done.\n       The fields used for detection are text, title, subject and description,\n       making this example suitable for detecting languages form full-text\n       rich documents injected via ExtractingRequestHandler.\n       See more about langId at http://wiki.apache.org/solr/LanguageDetection\n    --&gt;\n    &lt;!--\n     &lt;updateRequestProcessorChain name=\"langid\"&gt;\n       &lt;processor class=\"org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory\"&gt;\n         &lt;str name=\"langid.fl\"&gt;text,title,subject,description&lt;/str&gt;\n         &lt;str name=\"langid.langField\"&gt;language_s&lt;/str&gt;\n         &lt;str name=\"langid.fallback\"&gt;en&lt;/str&gt;\n       &lt;/processor&gt;\n       &lt;processor class=\"solr.LogUpdateProcessorFactory\" /&gt;\n       &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n     &lt;/updateRequestProcessorChain&gt;\n    --&gt;\n\n  &lt;!-- Script update proces
 sor\n\n    This example hooks in an update processor implemented using JavaScript.\n\n    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor\n  --&gt;\n  &lt;!--\n    &lt;updateRequestProcessorChain name=\"script\"&gt;\n      &lt;processor class=\"solr.StatelessScriptUpdateProcessorFactory\"&gt;\n        &lt;str name=\"script\"&gt;update-script.js&lt;/str&gt;\n        &lt;lst name=\"params\"&gt;\n          &lt;str name=\"config_param\"&gt;example config parameter&lt;/str&gt;\n        &lt;/lst&gt;\n      &lt;/processor&gt;\n      &lt;processor class=\"solr.RunUpdateProcessorFactory\" /&gt;\n    &lt;/updateRequestProcessorChain&gt;\n  --&gt;\n \n  &lt;!-- Response Writers\n\n       http://wiki.apache.org/solr/QueryResponseWriter\n\n       Request responses will be written using the writer specified by\n       the 'wt' request parameter matching the name of a registered\n       writer.\n\n       The \"default\" writer is the default and will
  be used if 'wt' is\n       not specified in the request.\n    --&gt;\n  &lt;!-- The following response writers are implicitly configured unless\n       overridden...\n    --&gt;\n  &lt;!--\n     &lt;queryResponseWriter name=\"xml\" \n                          default=\"true\"\n                          class=\"solr.XMLResponseWriter\" /&gt;\n     &lt;queryResponseWriter name=\"json\" class=\"solr.JSONResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"python\" class=\"solr.PythonResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"ruby\" class=\"solr.RubyResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"php\" class=\"solr.PHPResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"phps\" class=\"solr.PHPSerializedResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"csv\" class=\"solr.CSVResponseWriter\"/&gt;\n     &lt;queryResponseWriter name=\"schema.xml\" class=\"solr.SchemaXmlResponseWriter\"/&gt;\n    --&gt;\n\n  &lt;queryResponseWriter name=\"json
 \" class=\"solr.JSONResponseWriter\"&gt;\n     &lt;!-- For the purposes of the tutorial, JSON responses are written as\n      plain text so that they are easy to read in *any* browser.\n      If you expect a MIME type of \"application/json\" just remove this override.\n     --&gt;\n    &lt;str name=\"content-type\"&gt;text/plain; charset=UTF-8&lt;/str&gt;\n  &lt;/queryResponseWriter&gt;\n  \n  &lt;!--\n     Custom response writers can be declared as needed...\n    --&gt;\n  &lt;queryResponseWriter name=\"velocity\" class=\"solr.VelocityResponseWriter\" startup=\"lazy\"&gt;\n    &lt;str name=\"template.base.dir\"&gt;${velocity.template.base.dir:}&lt;/str&gt;\n  &lt;/queryResponseWriter&gt;\n\n  &lt;!-- XSLT response writer transforms the XML output by any xslt file found\n       in Solr's conf/xslt directory.  Changes to xslt files are checked for\n       every xsltCacheLifetimeSeconds.  \n    --&gt;\n  &lt;queryResponseWriter name=\"xslt\" class=\"solr.XSLTResponseWriter\"&gt;\n    
 &lt;int name=\"xsltCacheLifetimeSeconds\"&gt;5&lt;/int&gt;\n  &lt;/queryResponseWriter&gt;\n\n  &lt;!-- Query Parsers\n\n       http://wiki.apache.org/solr/SolrQuerySyntax\n\n       Multiple QParserPlugins can be registered by name, and then\n       used in either the \"defType\" param for the QueryComponent (used\n       by SearchHandler) or in LocalParams\n    --&gt;\n  &lt;!-- example of registering a query parser --&gt;\n  &lt;!--\n     &lt;queryParser name=\"myparser\" class=\"com.mycompany.MyQParserPlugin\"/&gt;\n    --&gt;\n\n  &lt;!-- Function Parsers\n\n       http://wiki.apache.org/solr/FunctionQuery\n\n       Multiple ValueSourceParsers can be registered by name, and then\n       used as function names when using the \"func\" QParser.\n    --&gt;\n  &lt;!-- example of registering a custom function parser  --&gt;\n  &lt;!--\n     &lt;valueSourceParser name=\"myfunc\" \n                        class=\"com.mycompany.MyValueSourceParser\" /&gt;\n    --&gt;\n    \n  \n  &lt;!-
 - Document Transformers\n       http://wiki.apache.org/solr/DocTransformers\n    --&gt;\n  &lt;!--\n     Could be something like:\n     &lt;transformer name=\"db\" class=\"com.mycompany.LoadFromDatabaseTransformer\" &gt;\n       &lt;int name=\"connection\"&gt;jdbc://....&lt;/int&gt;\n     &lt;/transformer&gt;\n     \n     To add a constant value to all docs, use:\n     &lt;transformer name=\"mytrans2\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" &gt;\n       &lt;int name=\"value\"&gt;5&lt;/int&gt;\n     &lt;/transformer&gt;\n     \n     If you want the user to still be able to change it with _value:something_ use this:\n     &lt;transformer name=\"mytrans3\" class=\"org.apache.solr.response.transform.ValueAugmenterFactory\" &gt;\n       &lt;double name=\"defaultValue\"&gt;5&lt;/double&gt;\n     &lt;/transformer&gt;\n\n      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The\n      EditorialMarkerFactory will do 
 exactly that:\n     &lt;transformer name=\"qecBooster\" class=\"org.apache.sol

<TRUNCATED>

[37/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_checkpoint_time.py
new file mode 100644
index 0000000..26127c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_checkpoint_time.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+import traceback
+
+from resource_management.libraries.functions.namenode_ha_utils import get_all_namenode_addresses
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
+HDFS_SITE_KEY = '{{hdfs-site}}'
+
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
+NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
+
+PERCENT_WARNING_KEY = 'checkpoint.time.warning.threshold'
+PERCENT_WARNING_DEFAULT = 200
+
+PERCENT_CRITICAL_KEY = 'checkpoint.time.critical.threshold'
+PERCENT_CRITICAL_DEFAULT = 200
+
+CHECKPOINT_TX_MULTIPLIER_WARNING_KEY = 'checkpoint.txns.multiplier.warning.threshold'
+CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT = 2
+
+CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY = 'checkpoint.txns.multiplier.critical.threshold'
+CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT = 4
+
+CHECKPOINT_TX_DEFAULT = 1000000
+CHECKPOINT_PERIOD_DEFAULT = 21600
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
+      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+  
+  uri = None
+  scheme = 'http'  
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+  checkpoint_tx = CHECKPOINT_TX_DEFAULT
+  checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  if NN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[NN_HTTP_POLICY_KEY]
+
+  if NN_CHECKPOINT_TX_KEY in configurations:
+    checkpoint_tx = configurations[NN_CHECKPOINT_TX_KEY]
+
+  if NN_CHECKPOINT_PERIOD_KEY in configurations:
+    checkpoint_period = configurations[NN_CHECKPOINT_PERIOD_KEY]
+    
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+  percent_warning = PERCENT_WARNING_DEFAULT
+  if PERCENT_WARNING_KEY in parameters:
+    percent_warning = float(parameters[PERCENT_WARNING_KEY])
+
+  percent_critical = PERCENT_CRITICAL_DEFAULT
+  if PERCENT_CRITICAL_KEY in parameters:
+    percent_critical = float(parameters[PERCENT_CRITICAL_KEY])
+
+  checkpoint_txn_multiplier_warning = CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT
+  if CHECKPOINT_TX_MULTIPLIER_WARNING_KEY in parameters:
+    checkpoint_txn_multiplier_warning = float(parameters[CHECKPOINT_TX_MULTIPLIER_WARNING_KEY])
+
+  checkpoint_txn_multiplier_critical = CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT
+  if CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY in parameters:
+    checkpoint_txn_multiplier_critical = float(parameters[CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY])
+
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+  # determine the right URI and whether to use SSL
+  hdfs_site = configurations[HDFS_SITE_KEY]
+
+  scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
+
+  nn_addresses = get_all_namenode_addresses(hdfs_site)
+  for nn_address in nn_addresses:
+    if nn_address.startswith(host_name + ":"):
+      uri = nn_address
+      break
+  if not uri:
+    return (RESULT_STATE_SKIPPED, ['NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))])
+
+  current_time = int(round(time.time() * 1000))
+
+  last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
+  journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
+
+  # start out assuming an OK status
+  label = None
+  result_code = "OK"
+
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      # curl requires an integer timeout
+      curl_connection_timeout = int(connection_timeout)
+
+      last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
+        kerberos_principal, last_checkpoint_time_qry,"checkpoint_time_alert", executable_paths, False,
+        "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
+      last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])
+
+      journal_transaction_info_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
+        kerberos_principal, journal_transaction_info_qry,"checkpoint_time_alert", executable_paths,
+        False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      journal_transaction_info_response_json = json.loads(journal_transaction_info_response)
+      journal_transaction_info = journal_transaction_info_response_json["beans"][0]["JournalTransactionInfo"]
+    else:
+      last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,
+      "LastCheckpointTime", connection_timeout))
+
+      journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,
+      "JournalTransactionInfo", connection_timeout)
+
+    journal_transaction_info_dict = json.loads(journal_transaction_info)
+  
+    last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
+    most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
+    transaction_difference = last_tx - most_recent_tx
+    
+    delta = (current_time - last_checkpoint_time)/1000
+
+    label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
+
+    is_checkpoint_txn_warning = transaction_difference > checkpoint_txn_multiplier_warning * int(checkpoint_tx)
+    is_checkpoint_txn_critical = transaction_difference > checkpoint_txn_multiplier_critical * int(checkpoint_tx)
+
+    # Either too many uncommitted transactions or missed check-pointing for
+    # long time decided by the thresholds
+    if is_checkpoint_txn_critical or (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
+      logger.debug('Raising critical alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
+      result_code = 'CRITICAL'
+    elif is_checkpoint_txn_warning or (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
+      logger.debug('Raising warning alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
+      result_code = 'WARNING'
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+        
+  return ((result_code, [label]))
+
+def get_time(delta):
+  h = int(delta/3600)
+  m = int((delta % 3600)/60)
+  return {'h':h, 'm':m}
+
+
+def get_value_from_jmx(query, jmx_property, connection_timeout):
+  response = None
+  
+  try:
+    response = urllib2.urlopen(query, timeout=connection_timeout)
+    data = response.read()
+    data_dict = json.loads(data)
+    return data_dict["beans"][0][jmx_property]
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_datanode_unmounted_data_dir.py
new file mode 100644
index 0000000..765831d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_datanode_unmounted_data_dir.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import logging
+import urlparse
+
+from resource_management.libraries.functions import file_system
+from resource_management.libraries.functions import mounted_dirs_helper
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_WARNING = 'WARNING'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+
+DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}'
+DATA_STORAGE_TAGS = ['[DISK]','[SSD]','[RAM_DISK]','[ARCHIVE]']
+DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
+
+logger = logging.getLogger()
+
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (DFS_DATA_DIR, DATA_DIR_MOUNT_FILE)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+
+  DataNode directories can be of the following formats and each needs to be supported:
+    /grid/dn/archive0
+    [SSD]/grid/dn/archive0
+    [ARCHIVE]file:///grid/dn/archive0
+  """
+  warnings = []
+  errors = []
+
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # Check required properties
+  if DFS_DATA_DIR not in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)])
+
+  dfs_data_dir = configurations[DFS_DATA_DIR]
+
+  if dfs_data_dir is None:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)])
+
+  # This follows symlinks and will return False for a broken link (even in the middle of the linked list)
+  data_dir_mount_file_exists = True
+  if not os.path.exists(DATA_DIR_MOUNT_FILE):
+    data_dir_mount_file_exists = False
+    warnings.append("{0} was not found.".format(DATA_DIR_MOUNT_FILE))
+
+  normalized_data_dirs = set()            # data dirs that have been normalized
+  data_dirs_not_exist = set()        # data dirs that do not exist
+  data_dirs_unknown = set()          # data dirs for which could not determine mount
+  data_dirs_on_root = set()          # set of data dirs that are on root mount
+  data_dirs_on_mount = set()         # set of data dirs that are mounted on a device
+  data_dirs_unmounted = []           # list of data dirs that are known to have become unmounted
+
+  # transform each data directory into something that we can use
+  for data_dir in dfs_data_dir.split(","):
+    if data_dir is None or data_dir.strip() == "":
+      continue
+
+    data_dir = data_dir.strip()
+
+    # filter out data storage tags
+    for tag in DATA_STORAGE_TAGS:
+      if data_dir.startswith(tag):
+        data_dir = data_dir.replace(tag, "")
+        continue
+
+    # parse the path in case it contains a URI scheme
+    data_dir = urlparse.urlparse(data_dir).path
+
+    normalized_data_dirs.add(data_dir)
+
+  # Sort the data dirs, which is needed for deterministic behavior when running the unit tests.
+  normalized_data_dirs = sorted(normalized_data_dirs)
+  for data_dir in normalized_data_dirs:
+    # This follows symlinks and will return False for a broken link (even in the middle of the linked list)
+    if os.path.isdir(data_dir):
+      curr_mount_point = file_system.get_mount_point_for_dir(data_dir)
+      curr_mount_point = curr_mount_point.strip() if curr_mount_point else curr_mount_point
+
+      if curr_mount_point is not None and curr_mount_point != "":
+        if curr_mount_point == "/":
+          data_dirs_on_root.add(data_dir)
+        else:
+          data_dirs_on_mount.add(data_dir)
+      else:
+        data_dirs_unknown.add(data_dir)
+    else:
+      data_dirs_not_exist.add(data_dir)
+
+  # To keep the messages consistent for all hosts, sort the sets into lists
+  normalized_data_dirs = sorted(normalized_data_dirs)
+  data_dirs_not_exist = sorted(data_dirs_not_exist)
+  data_dirs_unknown = sorted(data_dirs_unknown)
+  data_dirs_on_root = sorted(data_dirs_on_root)
+
+  if data_dirs_not_exist:
+    errors.append("The following data dir(s) were not found: {0}\n".format("\n".join(data_dirs_not_exist)))
+
+  if data_dirs_unknown:
+    errors.append("Cannot find the mount point for the following data dir(s):\n{0}".format("\n".join(data_dirs_unknown)))
+
+  if data_dir_mount_file_exists:
+    # This dictionary contains the expected values of <data_dir, mount_point>
+    # Hence, we only need to analyze the data dirs that are currently on the root partition
+    # and report an error if they were expected to be on a mount.
+    #
+    # If one of the data dirs is not present in the file, it means that DataNode has not been restarted after
+    # the configuration was changed on the server, so we cannot make any assertions about it.
+    expected_data_dir_to_mount = mounted_dirs_helper.get_dir_to_mount_from_file(DATA_DIR_MOUNT_FILE)
+    for data_dir in data_dirs_on_root:
+      if data_dir in expected_data_dir_to_mount and expected_data_dir_to_mount[data_dir] != "/":
+        data_dirs_unmounted.append(data_dir)
+
+    if len(data_dirs_unmounted) > 0:
+      errors.append("Detected data dir(s) that became unmounted and are now writing to the root partition:\n{0}".format("\n".join(data_dirs_unmounted)))
+  else:
+    # Couldn't make guarantees about the expected value of mount points, so rely on this strategy that is likely to work.
+    # It will report false positives (aka false alarms) if the user actually intended to have
+    # 1+ data dirs on a mount and 1+ data dirs on the root partition.
+    if len(data_dirs_on_mount) >= 1 and len(data_dirs_on_root) >= 1:
+      errors.append("Detected at least one data dir on a mount point, but these are writing to the root partition:\n{0}".format("\n".join(data_dirs_on_root)))
+
+  # Determine the status based on warnings and errors.
+  if len(errors) == 0:
+    status = RESULT_STATE_OK
+    messages = []
+
+    # Check for warnings
+    if len(warnings) > 0:
+      status = RESULT_STATE_WARNING
+      messages += warnings
+
+    if len(normalized_data_dirs) > 0:
+      messages.append("The following data dir(s) are valid:\n{0}".format("\n".join(normalized_data_dirs)))
+    else:
+      messages.append("There are no data directories to analyze.")
+
+    return (status, ["\n".join(messages)])
+  else:
+    # Report errors
+    return (RESULT_STATE_CRITICAL, ["\n".join(errors)])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_ha_namenode_health.py
new file mode 100644
index 0000000..28b3f22
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_ha_namenode_health.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+INADDR_ANY = '0.0.0.0'
+NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
+NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
+NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+LOGGER_EXCEPTION_MESSAGE = "[Alert] NameNode High Availability Health on {0} fails:"
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
+  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # if not in HA mode, then SKIP
+  if not NAMESERVICE_KEY in configurations:
+    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+  
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+  # determine whether or not SSL is enabled
+  is_ssl_enabled = False
+  if DFS_POLICY_KEY in configurations:
+    dfs_policy = configurations[DFS_POLICY_KEY]
+    if dfs_policy == "HTTPS_ONLY":
+      is_ssl_enabled = True
+
+  name_service = configurations[NAMESERVICE_KEY]
+  hdfs_site = configurations[HDFS_SITE_KEY]
+
+  # look for dfs.ha.namenodes.foo
+  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+  if not nn_unique_ids_key in hdfs_site:
+    return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
+
+  namenode_http_fragment = NAMENODE_HTTP_FRAGMENT
+  jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
+
+  if is_ssl_enabled:
+    namenode_http_fragment = NAMENODE_HTTPS_FRAGMENT
+    jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
+
+
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+
+  # now we have something like 'nn1,nn2,nn3,nn4'
+  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+  # ie dfs.namenode.http-address.hacluster.nn1
+  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+  for nn_unique_id in nn_unique_ids:
+    key = namenode_http_fragment.format(name_service,nn_unique_id)
+    rpc_key = NAMENODE_RPC_FRAGMENT.format(name_service,nn_unique_id)
+
+    if key in hdfs_site:
+      # use str() to ensure that unicode strings do not have the u' in them
+      value = str(hdfs_site[key])
+      if INADDR_ANY in value and rpc_key in hdfs_site:
+        rpc_value = str(hdfs_site[rpc_key])
+        if INADDR_ANY not in rpc_value:
+          rpc_host = rpc_value.split(":")[0]
+          value = value.replace(INADDR_ANY, rpc_host)
+
+      try:
+        jmx_uri = jmx_uri_fragment.format(value)
+        if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+          env = Environment.get_instance()
+
+          # curl requires an integer timeout
+          curl_connection_timeout = int(connection_timeout)
+
+          state_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir,
+            kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
+            "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
+            kinit_timer_ms = kinit_timer_ms)
+
+          state = _get_ha_state_from_json(state_response)
+        else:
+          state_response = get_jmx(jmx_uri, connection_timeout)
+          state = _get_ha_state_from_json(state_response)
+
+        if state == HDFS_NN_STATE_ACTIVE:
+          active_namenodes.append(value)
+        elif state == HDFS_NN_STATE_STANDBY:
+          standby_namenodes.append(value)
+        else:
+          unknown_namenodes.append(value)
+      except:
+        logger.exception(LOGGER_EXCEPTION_MESSAGE.format(host_name))
+        unknown_namenodes.append(value)
+
+  # there's only one scenario here; there is exactly 1 active and 1 standby
+  is_topology_healthy = len(active_namenodes) == 1 and len(standby_namenodes) == 1
+
+  result_label = 'Active{0}, Standby{1}, Unknown{2}'.format(str(active_namenodes),
+    str(standby_namenodes), str(unknown_namenodes))
+
+  if is_topology_healthy:
+    # if there is exactly 1 active and 1 standby NN
+    return (RESULT_STATE_OK, [result_label])
+  else:
+    # other scenario
+    return (RESULT_STATE_CRITICAL, [result_label])
+
+
+def get_jmx(query, connection_timeout):
+  response = None
+  
+  try:
+    response = urllib2.urlopen(query, timeout=connection_timeout)
+    json_data = response.read()
+    return json_data
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass
+
+
+def _get_ha_state_from_json(string_json):
+  """
+  Searches through the specified JSON string looking for HA state
+  enumerations.
+  :param string_json: the string JSON
+  :return:  the value of the HA state (active, standby, etc)
+  """
+  json_data = json.loads(string_json)
+  jmx_beans = json_data["beans"]
+
+  # look for NameNodeStatus-State first
+  for jmx_bean in jmx_beans:
+    if "name" not in jmx_bean:
+      continue
+
+    jmx_bean_name = jmx_bean["name"]
+    if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
+      return jmx_bean["State"]
+
+  # look for FSNamesystem-tag.HAState last
+  for jmx_bean in jmx_beans:
+    if "name" not in jmx_bean:
+      continue
+
+    jmx_bean_name = jmx_bean["name"]
+    if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem":
+      return jmx_bean["tag.HAState"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_metrics_deviation.py
new file mode 100644
index 0000000..8a06f56
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_metrics_deviation.py
@@ -0,0 +1,470 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import httplib
+import locale
+import json
+import logging
+import urllib
+import time
+import urllib2
+
+from resource_management import Environment
+from ambari_commons.aggregate_functions import sample_standard_deviation, mean
+
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from ambari_commons.ambari_metrics_helper import select_metric_collector_for_sink
+
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_WARNING = 'WARNING'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = '{{ams-site/timeline.metrics.service.webapp.address}}'
+METRICS_COLLECTOR_VIP_HOST_KEY = '{{cluster-env/metrics_collector_vip_host}}'
+METRICS_COLLECTOR_VIP_PORT_KEY = '{{cluster-env/metrics_collector_vip_port}}'
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+MERGE_HA_METRICS_PARAM_KEY = 'mergeHaMetrics'
+MERGE_HA_METRICS_PARAM_DEFAULT = False
+METRIC_NAME_PARAM_KEY = 'metricName'
+METRIC_NAME_PARAM_DEFAULT = ''
+METRIC_UNITS_PARAM_KEY = 'metric.units'
+METRIC_UNITS_DEFAULT = ''
+APP_ID_PARAM_KEY = 'appId'
+APP_ID_PARAM_DEFAULT = 'NAMENODE'
+
+# the interval to check the metric (should be cast to int but could be a float)
+INTERVAL_PARAM_KEY = 'interval'
+INTERVAL_PARAM_DEFAULT = 60
+
+# the default threshold to trigger a CRITICAL (should be cast to int but could a float)
+DEVIATION_CRITICAL_THRESHOLD_KEY = 'metric.deviation.critical.threshold'
+DEVIATION_CRITICAL_THRESHOLD_DEFAULT = 10
+
+# the default threshold to trigger a WARNING (should be cast to int but could be a float)
+DEVIATION_WARNING_THRESHOLD_KEY = 'metric.deviation.warning.threshold'
+DEVIATION_WARNING_THRESHOLD_DEFAULT = 5
+NAMENODE_SERVICE_RPC_PORT_KEY = ''
+
+MINIMUM_VALUE_THRESHOLD_KEY = 'minimumValue'
+
+AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
+
+# The variance for this alert is 27MB which is 27% of the 100MB average (20MB is the limit)
+DEVIATION_THRESHOLD_MESSAGE = "The variance for this alert is {0}{1} which is {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
+
+# The variance for this alert is 15MB which is within 20% of the 904ms average (20MB is the limit)
+DEVIATION_OK_MESSAGE = "The variance for this alert is {0}{1} which is within {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
+
+logger = logging.getLogger()
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,
+          EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,
+          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,
+          METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,
+          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations : a mapping of configuration key to value
+  parameters : a mapping of script parameter key to value
+  host_name : the name of this host where the alert is running
+
+  :type configurations dict
+  :type parameters dict
+  :type host_name str
+  """
+  hostnames = host_name
+  current_time = int(time.time()) * 1000
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+  merge_ha_metrics = MERGE_HA_METRICS_PARAM_DEFAULT
+  if MERGE_HA_METRICS_PARAM_KEY in parameters:
+    merge_ha_metrics = parameters[MERGE_HA_METRICS_PARAM_KEY].lower() == 'true'
+
+  metric_name = METRIC_NAME_PARAM_DEFAULT
+  if METRIC_NAME_PARAM_KEY in parameters:
+    metric_name = parameters[METRIC_NAME_PARAM_KEY]
+
+  metric_units = METRIC_UNITS_DEFAULT
+  if METRIC_UNITS_PARAM_KEY in parameters:
+    metric_units = parameters[METRIC_UNITS_PARAM_KEY]
+
+  app_id = APP_ID_PARAM_DEFAULT
+  if APP_ID_PARAM_KEY in parameters:
+    app_id = parameters[APP_ID_PARAM_KEY]
+
+  interval = INTERVAL_PARAM_DEFAULT
+  if INTERVAL_PARAM_KEY in parameters:
+    interval = _coerce_to_integer(parameters[INTERVAL_PARAM_KEY])
+
+  warning_threshold = DEVIATION_WARNING_THRESHOLD_DEFAULT
+  if DEVIATION_WARNING_THRESHOLD_KEY in parameters:
+    warning_threshold = _coerce_to_integer(parameters[DEVIATION_WARNING_THRESHOLD_KEY])
+
+  critical_threshold = DEVIATION_CRITICAL_THRESHOLD_DEFAULT
+  if DEVIATION_CRITICAL_THRESHOLD_KEY in parameters:
+    critical_threshold = _coerce_to_integer(parameters[DEVIATION_CRITICAL_THRESHOLD_KEY])
+
+  minimum_value_threshold = None
+  if MINIMUM_VALUE_THRESHOLD_KEY in parameters:
+    minimum_value_threshold = _coerce_to_integer(parameters[MINIMUM_VALUE_THRESHOLD_KEY])
+
+  #parse configuration
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  if METRICS_COLLECTOR_VIP_HOST_KEY in configurations and METRICS_COLLECTOR_VIP_PORT_KEY in configurations:
+    collector_host = configurations[METRICS_COLLECTOR_VIP_HOST_KEY]
+    collector_port = int(configurations[METRICS_COLLECTOR_VIP_PORT_KEY])
+  else:
+    # ams-site/timeline.metrics.service.webapp.address is required
+    if not METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY in configurations:
+      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)])
+    else:
+      collector_webapp_address = configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":")
+      if valid_collector_webapp_address(collector_webapp_address):
+        collector_host = select_metric_collector_for_sink(app_id.lower())
+        collector_port = int(collector_webapp_address[1])
+      else:
+        return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but set to {1}'.format(
+          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY])])
+
+  namenode_service_rpc_address = None
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  hdfs_site = configurations[HDFS_SITE_KEY]
+
+  if 'dfs.namenode.servicerpc-address' in hdfs_site:
+    namenode_service_rpc_address = hdfs_site['dfs.namenode.servicerpc-address']
+
+  # if namenode alert and HA mode
+  if NAMESERVICE_KEY in configurations and app_id.lower() == 'namenode':
+    # hdfs-site is required
+    if not HDFS_SITE_KEY in configurations:
+      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+    if SMOKEUSER_KEY in configurations:
+      smokeuser = configurations[SMOKEUSER_KEY]
+
+    executable_paths = None
+    if EXECUTABLE_SEARCH_PATHS in configurations:
+      executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+    # parse script arguments
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+      security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    kerberos_keytab = None
+    if KERBEROS_KEYTAB in configurations:
+      kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+    kerberos_principal = None
+    if KERBEROS_PRINCIPAL in configurations:
+      kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+      kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+    # determine whether or not SSL is enabled
+    is_ssl_enabled = False
+    if DFS_POLICY_KEY in configurations:
+      dfs_policy = configurations[DFS_POLICY_KEY]
+      if dfs_policy == "HTTPS_ONLY":
+        is_ssl_enabled = True
+
+    kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+    name_service = configurations[NAMESERVICE_KEY]
+
+    # look for dfs.ha.namenodes.foo
+    nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+    if not nn_unique_ids_key in hdfs_site:
+      return (RESULT_STATE_UNKNOWN, ['Unable to find unique NameNode alias key {0}'.format(nn_unique_ids_key)])
+
+    namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
+    jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
+
+    if is_ssl_enabled:
+      namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
+      jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
+
+    # now we have something like 'nn1,nn2,nn3,nn4'
+    # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+    # ie dfs.namenode.http-address.hacluster.nn1
+    namenodes = []
+    active_namenodes = []
+    nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+    for nn_unique_id in nn_unique_ids:
+      key = namenode_http_fragment.format(name_service, nn_unique_id)
+
+      if key in hdfs_site:
+        # use str() to ensure that unicode strings do not have the u' in them
+        value = str(hdfs_site[key])
+        namenode = str(hdfs_site[key]).split(":")[0]
+
+        namenodes.append(namenode)
+        try:
+          jmx_uri = jmx_uri_fragment.format(value)
+          if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+            env = Environment.get_instance()
+
+            # curl requires an integer timeout
+            curl_connection_timeout = int(connection_timeout)
+            state_response, error_msg, time_millis = curl_krb_request(env.tmp_dir,
+              kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
+              "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
+              kinit_timer_ms = kinit_timer_ms)
+
+            state = _get_ha_state_from_json(state_response)
+          else:
+            state_response = get_jmx(jmx_uri, connection_timeout)
+            state = _get_ha_state_from_json(state_response)
+
+          if state == HDFS_NN_STATE_ACTIVE:
+            active_namenodes.append(namenode)
+
+            # Only check active NN
+            nn_service_rpc_address_key = 'dfs.namenode.servicerpc-address.{0}.{1}'.format(name_service, nn_unique_id)
+            if nn_service_rpc_address_key in hdfs_site:
+              namenode_service_rpc_address = hdfs_site[nn_service_rpc_address_key]
+          pass
+        except:
+          logger.exception("Unable to determine the active NameNode")
+    pass
+
+    if merge_ha_metrics:
+      hostnames = ",".join(namenodes)
+      # run only on active NN, no need to run the same requests from the standby
+      if host_name not in active_namenodes:
+        return (RESULT_STATE_SKIPPED, ['This alert will be reported by another host.'])
+    pass
+
+  # Skip service rpc alert if port is not enabled
+  if not namenode_service_rpc_address and 'rpc.rpc.datanode' in metric_name:
+    return (RESULT_STATE_SKIPPED, ['Service RPC port is not enabled.'])
+
+  get_metrics_parameters = {
+    "metricNames": metric_name,
+    "appId": app_id,
+    "hostname": hostnames,
+    "startTime": current_time - interval * 60 * 1000,
+    "endTime": current_time,
+    "grouped": "true",
+    }
+
+  encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
+
+  try:
+    conn = httplib.HTTPConnection(collector_host, int(collector_port),
+                                  timeout=connection_timeout)
+    conn.request("GET", AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
+    response = conn.getresponse()
+    data = response.read()
+    conn.close()
+  except Exception:
+    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
+
+  if response.status != 200:
+    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
+
+  data_json = json.loads(data)
+  metrics = []
+  # will get large standard deviation for multiple hosts,
+  # if host1 reports small local values, but host2 reports large local values
+  for metrics_data in data_json["metrics"]:
+    metrics += metrics_data["metrics"].values()
+  pass
+
+  if not metrics or len(metrics) < 2:
+    number_of_data_points = len(metrics) if metrics else 0
+    return (RESULT_STATE_SKIPPED, ["There are not enough data points to calculate the standard deviation ({0} sampled)".format(
+      number_of_data_points)])
+
+  minimum_value_multiplier = 1
+  if 'dfs.FSNamesystem.CapacityUsed' in metric_name:
+    minimum_value_multiplier = 1024 * 1024  # MB to bytes
+  elif 'rpc.rpc.datanode' in metric_name or 'rpc.rpc.client' in metric_name:
+    minimum_value_multiplier = 1000  # seconds to millis
+
+  if minimum_value_threshold:
+    # Filter out points below min threshold
+    metrics = [metric for metric in metrics if metric > (minimum_value_threshold * minimum_value_multiplier)]
+    if len(metrics) < 2:
+      return (RESULT_STATE_OK, ['There were no data points above the minimum threshold of {0} seconds'.format(minimum_value_threshold)])
+
+  mean_value = mean(metrics)
+  stddev = sample_standard_deviation(metrics)
+
+  try:
+    deviation_percent = stddev / float(mean_value) * 100
+  except ZeroDivisionError:
+    # should not be a case for this alert
+    return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation because the mean value is 0"])
+
+  # log the AMS request
+  if logger.isEnabledFor(logging.DEBUG):
+    logger.debug("""
+    AMS request parameters - {0}
+    AMS response - {1}
+    Mean - {2}
+    Standard deviation - {3}
+    Percentage standard deviation - {4}
+    """.format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent))
+
+  mean_value_localized = locale.format("%.0f", mean_value, grouping=True)
+
+  variance_value = (deviation_percent / 100.0) * mean_value
+  variance_value_localized = locale.format("%.0f", variance_value, grouping=True)
+
+  # check for CRITICAL status
+  if deviation_percent > critical_threshold:
+    threshold_value = ((critical_threshold / 100.0) * mean_value)
+    threshold_value_localized = locale.format("%.0f", threshold_value, grouping=True)
+
+    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
+      mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+    return (RESULT_STATE_CRITICAL,[message])
+
+  # check for WARNING status
+  if deviation_percent > warning_threshold:
+    threshold_value = ((warning_threshold / 100.0) * mean_value)
+    threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
+
+    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
+      mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+    return (RESULT_STATE_WARNING, [message])
+
+  # return OK status; use the warning threshold as the value to compare against
+  threshold_value = ((warning_threshold / 100.0) * mean_value)
+  threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
+
+  message = DEVIATION_OK_MESSAGE.format(variance_value_localized, metric_units, warning_threshold,
+    mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+  return (RESULT_STATE_OK,[message])
+
+
+def valid_collector_webapp_address(webapp_address):
+  if len(webapp_address) == 2 \
+    and webapp_address[0] != '127.0.0.1' \
+    and webapp_address[1].isdigit():
+    return True
+
+  return False
+
+
+def get_jmx(query, connection_timeout):
+  response = None
+
+  try:
+    response = urllib2.urlopen(query, timeout=connection_timeout)
+    json_data = response.read()
+    return json_data
+  except Exception:
+    return {"beans": {}}
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass
+
+def _get_ha_state_from_json(string_json):
+  """
+  Searches through the specified JSON string looking for HA state
+  enumerations.
+  :param string_json: the string JSON
+  :return:  the value of the HA state (active, standby, etc)
+  """
+  json_data = json.loads(string_json)
+  jmx_beans = json_data["beans"]
+
+  # look for NameNodeStatus-State  first
+  for jmx_bean in jmx_beans:
+    if "name" not in jmx_bean:
+      continue
+
+    jmx_bean_name = jmx_bean["name"]
+    if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
+      return jmx_bean["State"]
+
+  # look for FSNamesystem-tag.HAState last
+  for jmx_bean in jmx_beans:
+    if "name" not in jmx_bean:
+      continue
+
+    jmx_bean_name = jmx_bean["name"]
+    if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem":
+      return jmx_bean["tag.HAState"]
+
+
+def _coerce_to_integer(value):
+  """
+  Attempts to correctly coerce a value to an integer. For the case of an integer or a float,
+  this will essentially either NOOP or return a truncated value. If the parameter is a string,
+  then it will first attempt to be coerced from a integer, and failing that, a float.
+  :param value: the value to coerce
+  :return: the coerced value as an integer
+  """
+  try:
+    return int(value)
+  except ValueError:
+    return int(float(value))

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_upgrade_finalized.py
new file mode 100644
index 0000000..427f1d1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/alerts/alert_upgrade_finalized.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+import traceback
+
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.libraries.functions.curl_krb_request import CONNECTION_TIMEOUT_DEFAULT
+from resource_management.core.environment import Environment
+from resource_management.libraries.functions.namenode_ha_utils import get_all_namenode_addresses
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+
+  :rtype tuple
+  """
+  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
+          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations : a mapping of configuration key to value
+  parameters : a mapping of script parameter key to value
+  host_name : the name of this host where the alert is running
+
+  :type configurations dict
+  :type parameters dict
+  :type host_name str
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  uri = None
+  http_policy = 'HTTP_ONLY'
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return 'SKIPPED', ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]
+
+  if NN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[NN_HTTP_POLICY_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+  # determine the right URI and whether to use SSL
+  hdfs_site = configurations[HDFS_SITE_KEY]
+
+  scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
+
+  nn_addresses = get_all_namenode_addresses(hdfs_site)
+  for nn_address in nn_addresses:
+    if nn_address.startswith(host_name + ":") or nn_address == host_name:
+      uri = nn_address
+      break
+  if not uri:
+    return 'SKIPPED', [
+      'NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))]
+
+  upgrade_finalized_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme, uri)
+
+  # start out assuming an OK status
+  label = None
+  result_code = "OK"
+
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(
+        env.tmp_dir, kerberos_keytab,
+        kerberos_principal, upgrade_finalized_qry, "upgrade_finalized_state", executable_paths, False,
+        "HDFS Upgrade Finalized State", smokeuser, kinit_timer_ms = kinit_timer_ms
+       )
+
+      upgrade_finalized_response_json = json.loads(last_checkpoint_time_response)
+      upgrade_finalized = bool(upgrade_finalized_response_json["beans"][0]["UpgradeFinalized"])
+
+    else:
+      upgrade_finalized = bool(get_value_from_jmx(upgrade_finalized_qry,
+                                                    "UpgradeFinalized"))
+
+    if upgrade_finalized:
+      label = "HDFS cluster is not in the upgrade state"
+      result_code = 'OK'
+    else:
+      label = "HDFS cluster is not finalized"
+      result_code = 'CRITICAL'
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))
+
+def get_value_from_jmx(query, jmx_property):
+  """
+   Read property from the jxm endpoint
+
+  :param query: jmx uri path
+  :param jmx_property: property name to read
+  :return: jmx property value
+  
+  :type query str
+  :type jmx_property str
+  """
+  response = None
+
+  try:
+    response = urllib2.urlopen(query, timeout=int(CONNECTION_TIMEOUT_DEFAULT))
+    data = response.read()
+
+    data_dict = json.loads(data)
+    return data_dict["beans"][0][jmx_property]
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/files/checkWebUI.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/files/checkWebUI.py
new file mode 100644
index 0000000..ddeb116
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/files/checkWebUI.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+import socket
+import ssl
+
+class TLS1HTTPSConnection(httplib.HTTPSConnection):
+  """
+  Some of python implementations does not work correctly with sslv3 but trying to use it, we need to change protocol to
+  tls1.
+  """
+  def __init__(self, host, port, **kwargs):
+    httplib.HTTPSConnection.__init__(self, host, port, **kwargs)
+
+  def connect(self):
+    sock = socket.create_connection((self.host, self.port), self.timeout)
+    if getattr(self, '_tunnel_host', None):
+      self.sock = sock
+      self._tunnel()
+    self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
+
+def make_connection(host, port, https):
+  try:
+    conn = httplib.HTTPConnection(host, port) if not https else httplib.HTTPSConnection(host, port)
+    conn.request("GET", "/")
+    return conn.getresponse().status
+  except ssl.SSLError:
+    # got ssl error, lets try to use TLS1 protocol, maybe it will work
+    try:
+      tls1_conn = TLS1HTTPSConnection(host, port)
+      tls1_conn.request("GET", "/")
+      return tls1_conn.getresponse().status
+    except Exception as e:
+      print e
+    finally:
+      tls1_conn.close()
+  except Exception as e:
+    print e
+  finally:
+    conn.close()
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+  parser.add_option("-s", "--https", dest="https", help="\"True\" if value of dfs.http.policy is \"HTTPS_ONLY\"")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+  https = options.https
+
+  for host in hosts:
+    httpCode = make_connection(host, port, https.lower() == "true")
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port if not https.lower() == "true" else "Cannot access WEB UI on: https://" + host + ":" + port
+      exit(1)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""


[18/51] [abbrv] ambari git commit: AMBARI-18450. Putting a service or process in Maintenance Mode does not take it off of the list.(vbrodetskyi)

Posted by sm...@apache.org.
AMBARI-18450. Putting a service or process in Maintenance Mode does not take it off of the <Restart All Required> list.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4f3a67d9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4f3a67d9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4f3a67d9

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 4f3a67d987d759f4ecf28f163cf994a373f4a2ff
Parents: a9a05f7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Thu Dec 8 17:33:06 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Thu Dec 8 17:33:06 2016 +0200

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         | 32 ++++++++-
 .../AmbariManagementControllerImplTest.java     | 74 ++++++++++++++++++++
 2 files changed, 104 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4f3a67d9/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index c9a3e04..c3cd82e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -1247,7 +1247,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               throw new HostNotFoundException(cluster.getClusterName(), sch.getHostName());
             }
 
-            r.setMaintenanceState(maintenanceStateHelper.getEffectiveState(sch, host).name());
+            MaintenanceState effectiveMaintenanceState = maintenanceStateHelper.getEffectiveState(sch, host);
+            if(filterByMaintenanceState(request, effectiveMaintenanceState)) {
+              continue;
+            }
+            r.setMaintenanceState(effectiveMaintenanceState.name());
+
             response.add(r);
           } catch (ServiceComponentHostNotFoundException e) {
             if (request.getServiceName() == null || request.getComponentName() == null) {
@@ -1298,7 +1303,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               throw new HostNotFoundException(cluster.getClusterName(), sch.getHostName());
             }
 
-            r.setMaintenanceState(maintenanceStateHelper.getEffectiveState(sch, host).name());
+            MaintenanceState effectiveMaintenanceState = maintenanceStateHelper.getEffectiveState(sch, host);
+            if(filterByMaintenanceState(request, effectiveMaintenanceState)) {
+              continue;
+            }
+            r.setMaintenanceState(effectiveMaintenanceState.name());
+
             response.add(r);
           }
         }
@@ -1307,6 +1317,24 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return response;
   }
 
+  private boolean filterByMaintenanceState(ServiceComponentHostRequest request, MaintenanceState effectiveMaintenanceState) {
+    if (request.getMaintenanceState() != null) {
+      MaintenanceState desiredMaintenanceState = MaintenanceState.valueOf(request.getMaintenanceState());
+      if (desiredMaintenanceState.equals(MaintenanceState.ON)) {
+        /*
+         * if we want components with ON state it can be one of IMPLIED_FROM_SERVICE,
+         * IMPLIED_FROM_SERVICE_AND_HOST, IMPLIED_FROM_HOST, ON, ro simply - not OFF
+         */
+        if (effectiveMaintenanceState.equals(MaintenanceState.OFF)) {
+          return true;
+        }
+      } else if (!desiredMaintenanceState.equals(effectiveMaintenanceState)){
+        return true;
+      }
+    }
+    return false;
+  }
+
   @Override
   public MaintenanceState getEffectiveMaintenanceState(ServiceComponentHost sch)
       throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f3a67d9/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 2507a46..78b804c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -1218,6 +1218,80 @@ public class AmbariManagementControllerImplTest {
   }
 
   @Test
+  public void testGetHostComponents___ServiceComponentHostFilteredByMaintenanceState() throws Exception {
+    // member state mocks
+    Injector injector = createStrictMock(Injector.class);
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    StackId stack = createNiceMock(StackId.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    final Host host = createNiceMock(Host.class);
+    Service service = createNiceMock(Service.class);
+    ServiceComponent component = createNiceMock(ServiceComponent.class);
+    MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
+    final ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
+    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
+
+    // requests
+    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
+        "cluster1", null, "component1", "host1", null);
+    request1.setMaintenanceState("ON");
+
+
+    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
+    setRequests.add(request1);
+
+    // expectations
+    // constructor init
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper);
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class));
+    expect(maintHelper.getEffectiveState(
+        anyObject(ServiceComponentHost.class),
+        anyObject(Host.class))).andReturn(MaintenanceState.IMPLIED_FROM_SERVICE).anyTimes();
+
+    // getHostComponent
+    expect(clusters.getCluster("cluster1")).andReturn(cluster);
+    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster));
+    expect(clusters.getHostsForCluster((String) anyObject())).andReturn(
+        new HashMap<String, Host>() {{
+          put("host1", host);
+        }}).anyTimes();
+
+    expect(cluster.getDesiredStackVersion()).andReturn(stack);
+    expect(cluster.getClusterName()).andReturn("cl1");
+    expect(stack.getStackName()).andReturn("stackName");
+    expect(stack.getStackVersion()).andReturn("stackVersion");
+
+    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
+    expect(cluster.getService("service1")).andReturn(service);
+    expect(service.getServiceComponent("component1")).andReturn(component);
+    expect(component.getName()).andReturn("component1").anyTimes();
+    expect(component.getServiceComponentHosts()).andReturn(new HashMap<String, ServiceComponentHost>() {{
+      put("host1", componentHost1);
+    }});
+
+    expect(componentHost1.convertToResponse(null)).andReturn(response1);
+    expect(componentHost1.getHostName()).andReturn("host1");
+
+    // replay mocks
+    replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo,
+        service, component, componentHost1, response1);
+
+    //test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    setAmbariMetaInfo(ambariMetaInfo, controller);
+
+    Set<ServiceComponentHostResponse> responses = controller.getHostComponents(setRequests);
+
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    assertTrue(responses.size() == 1);
+    verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1);
+  }
+
+  @Test
   public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundException() throws Exception {
     // member state mocks
     Injector injector = createStrictMock(Injector.class);


[45/51] [abbrv] ambari git commit: AMBARI-18990. Auto-fix common issues found by the DB consistency checker (dlysnichenko)

Posted by sm...@apache.org.
AMBARI-18990. Auto-fix common issues found by the DB consistency checker (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6029846c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6029846c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6029846c

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 6029846c94d3b9aa00bee7282071270206781688
Parents: 54da8c2
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Dec 9 15:29:20 2016 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Dec 9 15:29:20 2016 +0200

----------------------------------------------------------------------
 .../checks/DatabaseConsistencyCheckHelper.java  | 245 ++++++++++++++++++-
 .../checks/DatabaseConsistencyChecker.java      |   1 +
 .../ambari/server/controller/AmbariServer.java  |   3 +
 .../orm/entities/ClusterConfigEntity.java       |  17 ++
 ambari-server/src/main/python/ambari-server.py  |   2 +
 .../src/main/python/ambari_server_main.py       |   3 +
 .../server/controller/AmbariServerTest.java     |  26 ++
 7 files changed, 293 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 9c3ae5c..d40dfd2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -20,23 +20,40 @@ package org.apache.ambari.server.checks;
 import java.io.File;
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Scanner;
 import java.util.Set;
 
+import javax.inject.Provider;
+import javax.persistence.EntityManager;
+import javax.persistence.Query;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.UpgradeState;
 import org.apache.ambari.server.utils.VersionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -46,6 +63,7 @@ import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
 
 public class DatabaseConsistencyCheckHelper {
 
@@ -103,8 +121,17 @@ public class DatabaseConsistencyCheckHelper {
     }
   }
 
+
+  public static void fixDatabaseConsistency() {
+    fixHostComponentStatesCountEqualsHostComponentsDesiredStates();
+    fixClusterConfigsNotMappedToAnyService();
+  }
+
   public static void runAllDBChecks() {
     LOG.info("******************************* Check database started *******************************");
+    checkSchemaName();
+    checkMySQLEngine();
+    checkForConfigsNotMappedToService();
     checkForNotMappedConfigsToCluster();
     checkForConfigsSelectedMoreThanOnce();
     checkForHostsWithoutState();
@@ -200,7 +227,7 @@ public class DatabaseConsistencyCheckHelper {
     }
   }
 
-  /*
+  /**
   * This method checks if any config type in clusterconfigmapping table, has
   * more than one versions selected. If config version is selected(in selected column = 1),
   * it means that this version of config is actual. So, if any config type has more
@@ -260,7 +287,7 @@ public class DatabaseConsistencyCheckHelper {
     }
   }
 
-  /*
+  /**
   * This method checks if all hosts from hosts table
   * has related host state info in hoststate table.
   * If not then we are showing error.
@@ -315,7 +342,7 @@ public class DatabaseConsistencyCheckHelper {
     }
   }
 
-  /*
+  /**
   * This method checks if count of host component states equals count
   * of desired host component states. According to ambari logic these
   * two tables should have the same count of rows. If not then we are
@@ -392,8 +419,218 @@ public class DatabaseConsistencyCheckHelper {
 
   }
 
+  /**
+  * Remove configs that are not mapped to any service.
+  */
+  @Transactional
+  public static void fixClusterConfigsNotMappedToAnyService() {
+    LOG.info("Checking for configs not mapped to any Service");
+    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+    List<ClusterConfigEntity> notMappedClusterConfigs = getNotMappedClusterConfigsToService();
+
+    for (ClusterConfigEntity clusterConfigEntity : notMappedClusterConfigs){
+      List<String> types = new ArrayList<>();
+      String type = clusterConfigEntity.getType();
+      types.add(type);
+      LOG.error("Removing cluster config mapping of type {} that is not mapped to any service", type);
+      clusterDAO.removeClusterConfigMappingEntityByTypes(clusterConfigEntity.getClusterId(),types);
+      LOG.error("Removing config that is not mapped to any service", clusterConfigEntity);
+      clusterDAO.removeConfig(clusterConfigEntity);
+    }
+  }
 
-  /*
+
+  /**
+   * Find ClusterConfigs that are not mapped to Service
+   * @return ClusterConfigs that are not mapped to Service
+   */
+  private static List<ClusterConfigEntity> getNotMappedClusterConfigsToService() {
+    Provider<EntityManager> entityManagerProvider = injector.getProvider(EntityManager.class);
+    EntityManager entityManager = entityManagerProvider.get();
+
+    Query query = entityManager.createNamedQuery("ClusterConfigEntity.findNotMappedClusterConfigsToService",ClusterConfigEntity.class);
+
+    return (List<ClusterConfigEntity>) query.getResultList();
+  }
+
+  /**
+   * Look for configs that are not mapped to any service.
+   */
+  public static void checkForConfigsNotMappedToService() {
+    LOG.info("Checking for configs that are not mapped to any service");
+    List<ClusterConfigEntity> notMappedClasterConfigs = getNotMappedClusterConfigsToService();
+
+    if (!notMappedClasterConfigs.isEmpty()){
+      LOG.error("Found configs that are not mapped to any service!");
+      errorsFound = true;
+    }
+  }
+
+  /**
+  * This method checks if count of host component states equals count
+  * of desired host component states. According to ambari logic these
+  * two tables should have the same count of rows. If not then we are
+  * adding missed host components.
+  */
+  @Transactional
+  public static void fixHostComponentStatesCountEqualsHostComponentsDesiredStates() {
+    LOG.info("Checking that there are the same number of actual and desired host components");
+
+    HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
+    HostComponentDesiredStateDAO hostComponentDesiredStateDAO = injector.getInstance(HostComponentDesiredStateDAO.class);
+
+    List<HostComponentDesiredStateEntity> hostComponentDesiredStates = hostComponentDesiredStateDAO.findAll();
+    List<HostComponentStateEntity> hostComponentStates = hostComponentStateDAO.findAll();
+
+    Set<HostComponentDesiredStateEntity> missedHostComponentDesiredStates = new HashSet<>();
+    missedHostComponentDesiredStates.addAll(hostComponentDesiredStates);
+    Set<HostComponentStateEntity> missedHostComponentStates = new HashSet<>();
+    missedHostComponentStates.addAll(hostComponentStates);
+
+    for (Iterator<HostComponentStateEntity> stateIterator = missedHostComponentStates.iterator(); stateIterator.hasNext();){
+      HostComponentStateEntity hostComponentStateEntity = stateIterator.next();
+      for (Iterator<HostComponentDesiredStateEntity> desiredStateIterator = missedHostComponentDesiredStates.iterator(); desiredStateIterator.hasNext();) {
+        HostComponentDesiredStateEntity hostComponentDesiredStateEntity = desiredStateIterator.next();
+        if (hostComponentStateEntity.getComponentName().equals(hostComponentDesiredStateEntity.getComponentName()) &&
+            hostComponentStateEntity.getServiceName().equals(hostComponentDesiredStateEntity.getServiceName()) &&
+            hostComponentStateEntity.getHostId().equals(hostComponentDesiredStateEntity.getHostId())){
+          desiredStateIterator.remove();
+          stateIterator.remove();
+          break;
+        }
+      }
+    }
+
+    for (HostComponentDesiredStateEntity hostComponentDesiredStateEntity : missedHostComponentDesiredStates) {
+      HostComponentStateEntity stateEntity = new HostComponentStateEntity();
+      stateEntity.setClusterId(hostComponentDesiredStateEntity.getClusterId());
+      stateEntity.setComponentName(hostComponentDesiredStateEntity.getComponentName());
+      stateEntity.setServiceName(hostComponentDesiredStateEntity.getServiceName());
+      stateEntity.setVersion(State.UNKNOWN.toString());
+      stateEntity.setHostEntity(hostComponentDesiredStateEntity.getHostEntity());
+      stateEntity.setCurrentState(State.UNKNOWN);
+      stateEntity.setUpgradeState(UpgradeState.NONE);
+      stateEntity.setCurrentStack(hostComponentDesiredStateEntity.getDesiredStack());
+      stateEntity.setSecurityState(SecurityState.UNKNOWN);
+      stateEntity.setServiceComponentDesiredStateEntity(hostComponentDesiredStateEntity.getServiceComponentDesiredStateEntity());
+
+      LOG.error("Trying to add missing record in hostcomponentstate: {}", stateEntity);
+      hostComponentStateDAO.create(stateEntity);
+    }
+
+    for (HostComponentStateEntity missedHostComponentState : missedHostComponentStates) {
+
+      HostComponentDesiredStateEntity stateEntity = new HostComponentDesiredStateEntity();
+      stateEntity.setClusterId(missedHostComponentState.getClusterId());
+      stateEntity.setComponentName(missedHostComponentState.getComponentName());
+      stateEntity.setServiceName(missedHostComponentState.getServiceName());
+      stateEntity.setHostEntity(missedHostComponentState.getHostEntity());
+      stateEntity.setDesiredState(State.UNKNOWN);
+      stateEntity.setDesiredStack(missedHostComponentState.getCurrentStack());
+      stateEntity.setServiceComponentDesiredStateEntity(missedHostComponentState.getServiceComponentDesiredStateEntity());
+
+      LOG.error("Trying to add missing record in hostcomponentdesiredstate: {}", stateEntity);
+      hostComponentDesiredStateDAO.create(stateEntity);
+    }
+  }
+
+  /**
+  * This method checks db schema name for Postgres.
+  * */
+  public static void checkSchemaName () {
+    Configuration conf = injector.getInstance(Configuration.class);
+    if(conf.getDatabaseType()!=Configuration.DatabaseType.POSTGRES) {
+      return;
+    }
+    LOG.info("Ensuring that the schema set for Postgres is correct");
+    if (connection == null) {
+      if (dbAccessor == null) {
+        dbAccessor = injector.getInstance(DBAccessor.class);
+      }
+      connection = dbAccessor.getConnection();
+    }
+    ResultSet rs = null;
+    try {
+      DatabaseMetaData databaseMetaData = connection.getMetaData();
+
+      rs = databaseMetaData.getSchemas();
+
+      boolean ambariSchemaPresent = false;
+      if (rs != null) {
+        while (rs.next()) {
+          if(StringUtils.equals(rs.getString("TABLE_SCHEM"),conf.getDatabaseSchema())){
+            ambariSchemaPresent = true;
+            break;
+          }
+        }
+      }
+      if (!ambariSchemaPresent){
+        LOG.error("The schema %s defined for Ambari from ambari.properties has not been found in the database. " +
+          "This means that the Ambari tables are stored under the public schema which can lead to problems.", conf.getDatabaseSchema());
+        warningsFound = true;
+      }
+
+    } catch (SQLException e) {
+      LOG.error("Exception occurred during checking db schema name.: ", e);
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          LOG.error("Exception occurred during result set closing procedure: ", e);
+        }
+      }
+    }
+  }
+
+  /**
+  * This method checks tables engine type to be innodb for MySQL.
+  * */
+  public static void checkMySQLEngine () {
+    Configuration conf = injector.getInstance(Configuration.class);
+    if(conf.getDatabaseType()!=Configuration.DatabaseType.MYSQL) {
+      return;
+    }
+    LOG.info("Checking to ensure that the MySQL DB engine type is set to InnoDB");
+    if (connection == null) {
+      if (dbAccessor == null) {
+        dbAccessor = injector.getInstance(DBAccessor.class);
+      }
+      connection = dbAccessor.getConnection();
+    }
+
+    String GET_INNODB_ENGINE_SUPPORT = "select TABLE_NAME, ENGINE from information_schema.tables where TABLE_SCHEMA = '%s' and LOWER(ENGINE) != 'innodb';";
+
+    ResultSet rs = null;
+    Statement statement;
+
+    try {
+      statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
+      rs = statement.executeQuery(String.format(GET_INNODB_ENGINE_SUPPORT, conf.getDatabaseSchema()));
+      if (rs != null) {
+        List<String> tablesInfo = new ArrayList<>();
+        while (rs.next()) {
+          errorsFound = true;
+          tablesInfo.add(rs.getString("TABLE_NAME"));
+        }
+        if (!tablesInfo.isEmpty()){
+          LOG.error("Found tables with engine type that is not InnoDB : %s", StringUtils.join(tablesInfo, ','));
+        }
+      }
+    } catch (SQLException e) {
+      LOG.error("Exception occurred during checking MySQL engine to be innodb: ", e);
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          LOG.error("Exception occurred during result set closing procedure: ", e);
+        }
+      }
+    }
+  }
+
+  /**
   * This method checks several potential problems for services:
   * 1) Check if we have services in cluster which doesn't have service config id(not available in serviceconfig table).
   * 2) Check if service has no mapped configs to it's service config id.

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java
index 6fa36d4..f12dd50 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java
@@ -134,6 +134,7 @@ public class DatabaseConsistencyChecker {
 
           if (DatabaseConsistencyCheckHelper.ifErrorsFound()) {
             System.out.print(String.format("DB configs consistency check failed. Run \"ambari-server start --skip-database-check\" to skip. " +
+                  "You may try --auto-fix-database flag to attempt to fix issues automatically. " +
                   "If you use this \"--skip-database-check\" option, do not make any changes to your cluster topology " +
                   "or perform a cluster upgrade until you correct the database consistency issues. See \"%s\" " +
                   "for more details on the consistency issues.", ambariDBConsistencyCheckLog));

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 1f2a694..23f9dcc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -663,6 +663,9 @@ public class AmbariServer {
       System.out.println("Database consistency check started");
       Logger DB_CHECK_LOG = LoggerFactory.getLogger(DatabaseConsistencyCheckHelper.class);
       try{
+        if (System.getProperty("fixDatabaseConsistency") != null ){
+          DatabaseConsistencyCheckHelper.fixDatabaseConsistency();
+        }
         DatabaseConsistencyCheckHelper.runAllDBChecks();
       } catch(Throwable e) {
         System.out.println("Database consistency check: failed");

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 937e872..f96db60 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.orm.entities;
 
+import com.google.common.base.Objects;
+
 import java.util.Collection;
 
 import javax.persistence.Basic;
@@ -52,6 +54,7 @@ import javax.persistence.UniqueConstraint;
     @NamedQuery(name = "ClusterConfigEntity.findNextConfigVersion", query = "SELECT COALESCE(MAX(clusterConfig.version),0) + 1 as nextVersion FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.type=:configType AND clusterConfig.clusterId=:clusterId"),
     @NamedQuery(name = "ClusterConfigEntity.findAllConfigsByStack", query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.clusterId=:clusterId AND clusterConfig.stack=:stack"),
     @NamedQuery(name = "ClusterConfigEntity.findLatestConfigsByStack", query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.clusterId=:clusterId AND clusterConfig.timestamp = (SELECT MAX(clusterConfig2.timestamp) FROM ClusterConfigEntity clusterConfig2 WHERE clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND clusterConfig2.type = clusterConfig.type)"),
+    @NamedQuery(name = "ClusterConfigEntity.findNotMappedClusterConfigsToService", query = "SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE clusterConfig.serviceConfigEntities IS EMPTY AND clusterConfig.type != 'cluster-env'"),
     @NamedQuery(name = "ClusterConfigEntity.findClusterConfigMappingsByStack",
       query = "SELECT mapping FROM ClusterConfigMappingEntity mapping " +
         "JOIN ClusterConfigEntity config ON mapping.typeName = config.type AND mapping.tag = config.tag " +
@@ -266,4 +269,18 @@ public class ClusterConfigEntity {
   public void setServiceConfigEntities(Collection<ServiceConfigEntity> serviceConfigEntities) {
     this.serviceConfigEntities = serviceConfigEntities;
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this)
+      .add("clusterId", clusterId)
+      .add("type", type)
+      .add("version", version)
+      .add("tag", tag)
+      .add("timestamp", timestamp)
+      .toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index ab26c0d..64962d3 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -397,6 +397,7 @@ def init_parser_options(parser):
                     help="Specifies the path to the JDBC driver JAR file")
   parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
   parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
+  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
   add_parser_options('--mpack',
       default=None,
       help="Specify the path for management pack to be installed/upgraded",
@@ -498,6 +499,7 @@ def init_parser_options(parser):
   parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
   parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
   parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
+  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
   parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
   parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
                     help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/main/python/ambari_server_main.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py
index 6c77522..a3fb08d 100644
--- a/ambari-server/src/main/python/ambari_server_main.py
+++ b/ambari-server/src/main/python/ambari_server_main.py
@@ -225,6 +225,7 @@ def wait_for_server_start(pidFile, scmStatus):
 
   if 'Database consistency check: failed' in open(configDefaults.SERVER_OUT_FILE).read():
     print "DB configs consistency check failed. Run \"ambari-server start --skip-database-check\" to skip. " \
+    "You may try --auto-fix-database flag to attempt to fix issues automatically. " \
     "If you use this \"--skip-database-check\" option, do not make any changes to your cluster topology " \
     "or perform a cluster upgrade until you correct the database consistency issues. See " + \
           configDefaults.DB_CHECK_LOG + "for more details on the consistency issues."
@@ -337,6 +338,8 @@ def server_process_main(options, scmStatus=None):
     properties.process_pair(CHECK_DATABASE_SKIPPED_PROPERTY, "true")
   else:
     print "Ambari database consistency check started..."
+    if options.fix_database_consistency:
+      jvm_args += " -DfixDatabaseConsistency"
     properties.process_pair(CHECK_DATABASE_SKIPPED_PROPERTY, "false")
 
   update_properties(properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6029846c/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java
index 45e319a..d8af35a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java
@@ -19,12 +19,15 @@
 package org.apache.ambari.server.controller;
 
 import javax.persistence.EntityManager;
+import javax.persistence.Query;
+import javax.persistence.TypedQuery;
 import javax.servlet.DispatcherType;
 import javax.servlet.SessionCookieConfig;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.partialMockBuilder;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
@@ -34,6 +37,7 @@ import java.net.PasswordAuthentication;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import java.util.ArrayList;
 import java.util.EnumSet;
 
 import org.apache.ambari.server.AmbariException;
@@ -220,6 +224,16 @@ public class AmbariServerTest {
     AmbariServer ambariServer = new AmbariServer();
 
 
+    final Configuration mockConfiguration = partialMockBuilder(Configuration.class).withConstructor()
+        .addMockedMethod("getDatabaseType").createMock();
+    final TypedQuery mockQuery = easyMockSupport.createNiceMock(TypedQuery.class);
+
+    expect(mockConfiguration.getDatabaseType()).andReturn(null).anyTimes();
+    expect(mockEntityManager.createNamedQuery(anyString(),anyObject(Class.class))).andReturn(mockQuery);
+    expect(mockQuery.getResultList()).andReturn(new ArrayList());
+
+    replay(mockConfiguration);
+
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
       protected void configure() {
@@ -229,6 +243,7 @@ public class AmbariServerTest {
         bind(OsFamily.class).toInstance(mockOSFamily);
         bind(EntityManager.class).toInstance(mockEntityManager);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(Configuration.class).toInstance(mockConfiguration);
       }
     });
 
@@ -281,6 +296,16 @@ public class AmbariServerTest {
     AmbariServer ambariServer = new AmbariServer();
 
 
+    final Configuration mockConfiguration = partialMockBuilder(Configuration.class).withConstructor()
+        .addMockedMethod("getDatabaseType").createMock();
+    final TypedQuery mockQuery = easyMockSupport.createNiceMock(TypedQuery.class);
+
+    expect(mockConfiguration.getDatabaseType()).andReturn(null).anyTimes();
+    expect(mockEntityManager.createNamedQuery(anyString(),anyObject(Class.class))).andReturn(mockQuery);
+    expect(mockQuery.getResultList()).andReturn(new ArrayList());
+
+    replay(mockConfiguration);
+
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
       protected void configure() {
@@ -290,6 +315,7 @@ public class AmbariServerTest {
         bind(OsFamily.class).toInstance(mockOSFamily);
         bind(EntityManager.class).toInstance(mockEntityManager);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(Configuration.class).toInstance(mockConfiguration);
       }
     });
 


[40/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/core-site.xml
new file mode 100644
index 0000000..20b1930
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/core-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <property>
+    <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
+    <value>120</value>
+    <description>ZooKeeper Failover Controller retries setting for your environment</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- i/o properties -->
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- file system properties -->
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes after which the checkpoint gets deleted.
+        If zero, the trash feature is disabled.
+        This option may be configured both on the server and the client.
+        If trash is disabled server side then the client side configuration is checked.
+        If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ipc.server.tcpnodelay</name>
+    <value>true</value>
+    <description>Turn on/off Nagle's algorithm for the TCP socket
+      connection on
+      the server. Setting to true disables the algorithm and may
+      decrease latency
+      with a cost of more/smaller packets.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
+     Enable authorization for different protocols.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>DEFAULT</value>
+    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/etc/hadoop/conf/topology_script.py</value>
+    <description>
+      Location of topology script used by Hadoop to determine the rack location of nodes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
new file mode 100644
index 0000000..24032fa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
@@ -0,0 +1,421 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+    <display-name>Hadoop Log Dir Prefix</display-name>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <display-name>Hadoop PID Dir Prefix</display-name>
+    <description>Hadoop PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <display-name>Hadoop Root Logger</display-name>
+    <description>Hadoop Root Logger</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+    <display-name>Hadoop maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+    <display-name>NameNode Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <display-name>NameNode new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+    <display-name>NameNode maximum new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+    <display-name>NameNode permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+    <display-name>NameNode maximum permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+    <display-name>DataNode maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <display-name>Proxy User Group</display-name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <display-name>HDFS User</display-name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_tmp_dir</name>
+    <value>/tmp</value>
+    <description>HDFS tmp Dir</description>
+    <display-name>HDFS tmp Dir</display-name>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nofile_limit</name>
+    <value>128000</value>
+    <description>Max open files limit setting for HDFS user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for HDFS user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_backup_dir</name>
+    <description>Local directory for storing backup copy of NameNode images during upgrade</description>
+    <value>/tmp/upgrades</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hdfs_user_keytab</name>
+    <description>HDFS keytab path</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_principal_name</name>
+    <description>HDFS principal name</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>keyserver_host</name>
+    <value> </value>
+    <display-name>Key Server Host</display-name>
+    <description>Hostnames where Key Management Server is installed</description>
+    <value-attributes>
+      <type>string</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>keyserver_port</name>
+    <value/>
+    <display-name>Key Server Port</display-name>
+    <description>Port number where Key Management Server is available</description>
+    <value-attributes>
+      <type>int</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+      # Set Hadoop-specific environment variables here.
+
+      # The only required environment variable is JAVA_HOME.  All others are
+      # optional.  When running a distributed configuration it is best to
+      # set JAVA_HOME in this file, so that it is correctly defined on
+      # remote nodes.
+
+      # The java implementation to use.  Required.
+      export JAVA_HOME={{java_home}}
+      export HADOOP_HOME_WARN_SUPPRESS=1
+
+      # Hadoop home directory
+      export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+      # Hadoop Configuration Directory
+      #TODO: if env var set that can cause problems
+      export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+
+      # Path to jsvc required by secure datanode
+      export JSVC_HOME={{jsvc_path}}
+
+
+      # The maximum amount of heap to use, in MB. Default is 1000.
+      if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
+      if [ "$HADOOP_HEAPSIZE" = "" ]; then
+      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+      fi
+      else
+      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+      fi
+
+
+      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+      # Extra Java runtime options.  Empty by default.
+      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+      # Command specific options appended to HADOOP_OPTS when specified
+
+      {% if java_version &lt; 8 %}
+      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
+      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+      {% else %}
+      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+      {% endif %}
+      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+      HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+      # On secure datanodes, user to run the datanode as after dropping privileges
+      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+      # Extra ssh options.  Empty by default.
+      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+      # Where log files are stored.  $HADOOP_HOME/logs by default.
+      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+      # History server logs
+      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+      # Where log files are stored in the secure data environment.
+      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+      # host:path where hadoop code should be rsync'd from.  Unset by default.
+      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+      # Seconds to sleep between slave commands.  Unset by default.  This
+      # can be useful in large clusters, where, e.g., slave rsyncs can
+      # otherwise arrive faster than the master can service them.
+      # export HADOOP_SLAVE_SLEEP=0.1
+
+      # The directory where pid files are stored. /tmp by default.
+      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+      # History server pid
+      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
+
+      # A string representing this instance of hadoop. $USER by default.
+      export HADOOP_IDENT_STRING=$USER
+
+      # The scheduling priority for daemon processes.  See 'man nice'.
+
+      # export HADOOP_NICENESS=10
+
+      # Add database libraries
+      JAVA_JDBC_LIBS=""
+      if [ -d "/usr/share/java" ]; then
+      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+      do
+      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+      done
+      fi
+
+      # Add libraries required by nodemanager
+      MAPREDUCE_LIBS={{mapreduce_libs_path}}
+
+      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
+      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+      if [ -d "/usr/lib/tez" ]; then
+      export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+      fi
+
+      # Setting path to hdfs command line
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+      #Mostly required for hadoop 2.0
+      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+
+      {% if is_datanode_max_locked_memory_set %}
+      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
+      # Makes sense to fix only when runing DN as root
+      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+      ulimit -l {{datanode_max_locked_memory}}
+      fi
+      {% endif %}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
new file mode 100644
index 0000000..6b45e84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hadoop-metrics2.properties -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-metrics2.properties template</display-name>
+    <description>This is the jinja template for hadoop-metrics2.properties file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+maptask.sink.timeline.collector.hosts={{ams_collector_hosts}}
+reducetask.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..8e9486d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..37b339e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>content</name>
+    <display-name>hdfs-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+      #
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements.  See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership.  The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License.  You may obtain a copy of the License at
+      #
+      #  http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing,
+      # software distributed under the License is distributed on an
+      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+      # KIND, either express or implied.  See the License for the
+      # specific language governing permissions and limitations
+      # under the License.
+      #
+
+
+      # Define some default values that can be overridden by system properties
+      # To change daemon root logger use hadoop_root_logger in hadoop-env
+      hadoop.root.logger=INFO,console
+      hadoop.log.dir=.
+      hadoop.log.file=hadoop.log
+
+
+      # Define the root logger to the system property "hadoop.root.logger".
+      log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+      # Logging Threshold
+      log4j.threshhold=ALL
+
+      #
+      # Daily Rolling File Appender
+      #
+
+      log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+      # Rollver at midnight
+      log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+      # 30-day backup
+      #log4j.appender.DRFA.MaxBackupIndex=30
+      log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+      # Pattern format: Date LogLevel LoggerName LogMessage
+      log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+      # Debugging Pattern format
+      #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+      #
+      # console
+      # Add "console" to rootlogger above if you want to use this
+      #
+
+      log4j.appender.console=org.apache.log4j.ConsoleAppender
+      log4j.appender.console.target=System.err
+      log4j.appender.console.layout=org.apache.log4j.PatternLayout
+      log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+      #
+      # TaskLog Appender
+      #
+
+      #Default values
+      hadoop.tasklog.taskid=null
+      hadoop.tasklog.iscleanup=false
+      hadoop.tasklog.noKeepSplits=4
+      hadoop.tasklog.totalLogFileSize=100
+      hadoop.tasklog.purgeLogSplits=true
+      hadoop.tasklog.logsRetainHours=12
+
+      log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+      log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+      log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+      log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+      log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+      log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+      #
+      #Security audit appender
+      #
+      hadoop.security.logger=INFO,console
+      hadoop.security.log.maxfilesize=256MB
+      hadoop.security.log.maxbackupindex=20
+      log4j.category.SecurityLogger=${hadoop.security.logger}
+      hadoop.security.log.file=SecurityAuth.audit
+      log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+      log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+      log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+      log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+      log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+      log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+      log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+      log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+      log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+      #
+      # hdfs audit logging
+      #
+      hdfs.audit.logger=INFO,console
+      log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+      log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+      log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+      log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+      #
+      # NameNode metrics logging.
+      # The default is to retain two namenode-metrics.log files up to 64MB each.
+      #
+      namenode.metrics.logger=INFO,NullAppender
+      log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+      log4j.additivity.NameNodeMetricsLog=false
+      log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+      log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+      log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+      log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+      log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+      log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+      #
+      # mapred audit logging
+      #
+      mapred.audit.logger=INFO,console
+      log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+      log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+      log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+      log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+      #
+      # Rolling File Appender
+      #
+
+      log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+      log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+      # Logfile size and and 30-day backups
+      log4j.appender.RFA.MaxFileSize=256MB
+      log4j.appender.RFA.MaxBackupIndex=10
+
+      log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+      # Custom Logging levels
+
+      hadoop.metrics.log.level=INFO
+      #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+      #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+      #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+      log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+      # Jets3t library
+      log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+      #
+      # Null Appender
+      # Trap security logger on the hadoop client side
+      #
+      log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+      #
+      # Event Counter Appender
+      # Sends counts of logging messages at different severity levels to Hadoop Metrics.
+      #
+      log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+      # Removes "deprecated" messages
+      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+      #
+      # HDFS block state change log from block manager
+      #
+      # Uncomment the following to suppress normal block state change
+      # messages from BlockManager in NameNode.
+      #log4j.logger.BlockStateChange=WARN
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
new file mode 100644
index 0000000..d85a028
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
@@ -0,0 +1,248 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>service_name</name>
+    <display-name>Service name</display-name>
+    <description>Service name for Logsearch Portal (label)</description>
+    <value>HDFS</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>component_mappings</name>
+    <display-name>Component mapping</display-name>
+    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
+    <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>Logfeeder Config</display-name>
+    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
+    <value>
+{
+  "input":[
+    {
+      "type":"hdfs_datanode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
+    },
+    {
+      "type":"hdfs_namenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
+    },
+    {
+      "type":"hdfs_journalnode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
+    },
+    {
+      "type":"hdfs_secondarynamenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
+    },
+    {
+      "type":"hdfs_zkfc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
+    },
+    {
+      "type":"hdfs_nfs3",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
+    },
+    {
+      "type":"hdfs_audit",
+      "rowtype":"audit",
+      "is_enabled":"true",
+      "add_fields":{
+        "logType":"HDFSAudit",
+        "enforcer":"hadoop-acl",
+        "repoType":"1",
+        "repo":"hdfs"
+      },
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
+    }
+   ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_datanode",
+            "hdfs_journalnode",
+            "hdfs_secondarynamenode",
+            "hdfs_namenode",
+            "hdfs_zkfc",
+            "hdfs_nfs3"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+       }
+     },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "evtTime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+         }
+       }
+     },
+    {
+      "filter":"keyvalue",
+      "sort_order":1,
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+         }
+       },
+      "source_field":"log_message",
+      "value_split":"=",
+      "field_split":"\t",
+      "post_map_values":{
+        "src":{
+          "map_fieldname":{
+            "new_fieldname":"resource"
+          }
+         },
+        "ip":{
+          "map_fieldname":{
+            "new_fieldname":"cliIP"
+          }
+         },
+        "allowed":[
+          {
+            "map_fieldvalue":{
+              "pre_value":"true",
+              "post_value":"1"
+            }
+           },
+          {
+            "map_fieldvalue":{
+              "pre_value":"false",
+              "post_value":"0"
+            }
+           },
+          {
+            "map_fieldname":{
+              "new_fieldname":"result"
+            }
+           }
+         ],
+        "cmd":{
+          "map_fieldname":{
+            "new_fieldname":"action"
+          }
+         },
+        "proto":{
+          "map_fieldname":{
+            "new_fieldname":"cliType"
+          }
+         },
+        "callerContext":{
+          "map_fieldname":{
+            "new_fieldname":"req_caller_id"
+          }
+         }
+       }
+     },
+    {
+      "filter":"grok",
+      "sort_order":2,
+      "source_field":"ugi",
+      "remove_source_field":"false",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+         }
+       },
+      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
+      "post_map_values":{
+        "user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+         },
+        "x_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+         },
+        "p_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+         },
+        "k_user":{
+          "map_fieldname":{
+            "new_fieldname":"proxyUsers"
+          }
+         },
+        "p_authType":{
+          "map_fieldname":{
+            "new_fieldname":"authType"
+          }
+         },
+        "k_authType":{
+          "map_fieldname":{
+            "new_fieldname":"proxyAuthType"
+          }
+         }
+       }
+     }
+   ]
+ }
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
new file mode 100644
index 0000000..689b6d08
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
@@ -0,0 +1,632 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <!-- file system properties -->
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <!-- cluster variant -->
+    <value>/hadoop/hdfs/namenode</value>
+    <display-name>NameNode directories</display-name>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+    <value-attributes>
+      <type>directories</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <display-name>WebHDFS enabled</display-name>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <final>true</final>
+    <display-name>DataNode failed disk tolerance</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <display-name>DataNode directories</display-name>
+    <description>Determines where on the local filesystem an DFS data node
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <display-name>SecondaryNameNode Checkpoint directories</display-name>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>${dfs.namenode.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directories then the edits are
+      replicated in all of the directories for redundancy.
+      Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>21600</value>
+    <display-name>HDFS Maximum Checkpoint Delay</display-name>
+    <description>The number of seconds between two periodic checkpoints.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>seconds</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.txns</name>
+    <value>1000000</value>
+    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
+      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <display-name>Block replication</display-name>
+    <description>Default block replication.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>0.999</value>
+    <description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
+    <display-name>Minimum replicated blocks %</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.990</minimum>
+      <maximum>1.000</maximum>
+      <increment-step>0.001</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+    <description>
+      This property is used by HftpFileSystem.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+    <description>
+      The datanode server address and port for data transfer.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.https.address</name>
+    <value>0.0.0.0:50475</value>
+    <description>
+      The datanode https server address and port.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.rpc-address</name>
+    <value>localhost:8020</value>
+    <description>RPC address that handles all clients requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <display-name>Reserved space for HDFS</display-name>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>bytes</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
+    <display-name>DataNode max data transfer threads</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>48000</maximum>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Permissions configuration -->
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+    <display-name>NameNode Server threads</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>200</maximum>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50470</value>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+    <display-name>DataNode directories permission</display-name>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>0</value>
+    <display-name>Access time precision</display-name>
+    <description>The access time for HDFS file is precise up to this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+    <description>The address and port the JournalNode web UI listens on.
+      If the port is 0 then the server will start on a free port. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.journalnode.https-address</name>
+    <value>0.0.0.0:8481</value>
+    <description>The address and port the JournalNode HTTPS server listens on.
+      If the port is 0 then the server will start on a free port. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/hadoop/hdfs/journalnode</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- HDFS Short-Circuit Local Reads -->
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <display-name>HDFS Short-circuit read</display-name>
+    <description>
+      This configuration parameter turns on short-circuit local reads.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>
+      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <value>4096</value>
+    <description>
+      The DFSClient maintains a cache of recently opened file descriptors. This
+      parameter controls the size of that cache. Setting this higher will use
+      more file descriptors, but potentially provide better performance on
+      workloads involving lots of seeks.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.name.dir.restore</name>
+    <value>true</value>
+    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>dfs.namenode.audit.log.async</name>
+    <value>true</value>
+    <description>Whether to enable async auditlog</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.fslock.fair</name>
+    <value>false</value>
+    <description>Whether fsLock is fair</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+    <value>3600</value>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.client.retry.policy.enabled</name>
+    <value>false</value>
+    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.content-summary.limit</name>
+    <value>5000</value>
+    <description>Dfs content summary limit.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encryption.key.provider.uri</name>
+    <description>
+      The KeyProvider to use when interacting with encryption keys used
+      when reading and writing to an encryption zone.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>nfs.file.dump.dir</name>
+    <value>/tmp/.hdfs-nfs</value>
+    <display-name>NFSGateway dump directory</display-name>
+    <description>
+      This directory is used to temporarily save out-of-order writes before
+      writing to HDFS. For each file, the out-of-order writes are dumped after
+      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+      One needs to make sure the directory has enough space.
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfs.exports.allowed.hosts</name>
+    <value>* rw</value>
+    <description>
+      By default, the export can be mounted by any client. To better control the access,
+      users can update the following property. The value string contains machine name and access privilege,
+      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
+      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
+      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
+      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
+    </description>
+    <display-name>Allowed hosts</display-name>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer.cipher.suites</name>
+    <value>AES/CTR/NoPadding</value>
+    <description>
+      This value may be either undefined or AES/CTR/NoPadding. If defined, then
+      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption.
+      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm
+      is used. By default, the property is not defined.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.inode.attributes.provider.class</name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[41/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d845449a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d845449a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d845449a

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: d845449afaa2f64c6b06f48159248c0538bd493c
Parents: facfa8c
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu Dec 8 15:16:23 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Dec 8 15:16:23 2016 -0800

----------------------------------------------------------------------
 .../common-services/HDFS/3.0.0/alerts.json      | 1786 ++++
 .../HDFS/3.0.0/configuration/core-site.xml      |  224 +
 .../HDFS/3.0.0/configuration/hadoop-env.xml     |  421 +
 .../hadoop-metrics2.properties.xml              |  125 +
 .../HDFS/3.0.0/configuration/hadoop-policy.xml  |  130 +
 .../HDFS/3.0.0/configuration/hdfs-log4j.xml     |  226 +
 .../3.0.0/configuration/hdfs-logsearch-conf.xml |  248 +
 .../HDFS/3.0.0/configuration/hdfs-site.xml      |  632 ++
 .../3.0.0/configuration/ranger-hdfs-audit.xml   |  217 +
 .../ranger-hdfs-plugin-properties.xml           |   98 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 +
 .../configuration/ranger-hdfs-security.xml      |   65 +
 .../HDFS/3.0.0/configuration/ssl-client.xml     |   70 +
 .../HDFS/3.0.0/configuration/ssl-server.xml     |   80 +
 .../common-services/HDFS/3.0.0/kerberos.json    |  246 +
 .../common-services/HDFS/3.0.0/metainfo.xml     |  405 +
 .../common-services/HDFS/3.0.0/metrics.json     | 7905 ++++++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  255 +
 .../alerts/alert_datanode_unmounted_data_dir.py |  177 +
 .../package/alerts/alert_ha_namenode_health.py  |  243 +
 .../package/alerts/alert_metrics_deviation.py   |  470 ++
 .../package/alerts/alert_upgrade_finalized.py   |  179 +
 .../HDFS/3.0.0/package/files/checkWebUI.py      |   83 +
 .../HDFS/3.0.0/package/scripts/__init__.py      |   20 +
 .../scripts/balancer-emulator/balancer-err.log  | 1032 +++
 .../scripts/balancer-emulator/balancer.log      |   29 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../HDFS/3.0.0/package/scripts/datanode.py      |  178 +
 .../3.0.0/package/scripts/datanode_upgrade.py   |  156 +
 .../HDFS/3.0.0/package/scripts/hdfs.py          |  178 +
 .../HDFS/3.0.0/package/scripts/hdfs_client.py   |  122 +
 .../HDFS/3.0.0/package/scripts/hdfs_datanode.py |   85 +
 .../HDFS/3.0.0/package/scripts/hdfs_namenode.py |  562 ++
 .../3.0.0/package/scripts/hdfs_nfsgateway.py    |   75 +
 .../3.0.0/package/scripts/hdfs_rebalance.py     |  130 +
 .../3.0.0/package/scripts/hdfs_snamenode.py     |   66 +
 .../3.0.0/package/scripts/install_params.py     |   39 +
 .../HDFS/3.0.0/package/scripts/journalnode.py   |  203 +
 .../package/scripts/journalnode_upgrade.py      |  152 +
 .../HDFS/3.0.0/package/scripts/namenode.py      |  424 +
 .../3.0.0/package/scripts/namenode_ha_state.py  |  219 +
 .../3.0.0/package/scripts/namenode_upgrade.py   |  322 +
 .../HDFS/3.0.0/package/scripts/nfsgateway.py    |  151 +
 .../HDFS/3.0.0/package/scripts/params.py        |   28 +
 .../HDFS/3.0.0/package/scripts/params_linux.py  |  527 ++
 .../3.0.0/package/scripts/params_windows.py     |   79 +
 .../HDFS/3.0.0/package/scripts/service_check.py |  152 +
 .../3.0.0/package/scripts/setup_ranger_hdfs.py  |  121 +
 .../HDFS/3.0.0/package/scripts/snamenode.py     |  155 +
 .../HDFS/3.0.0/package/scripts/status_params.py |   58 +
 .../HDFS/3.0.0/package/scripts/utils.py         |  383 +
 .../HDFS/3.0.0/package/scripts/zkfc_slave.py    |  225 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/3.0.0/package/templates/hdfs.conf.j2   |   35 +
 .../HDFS/3.0.0/package/templates/slaves.j2      |   21 +
 .../HDFS/3.0.0/quicklinks/quicklinks.json       |   80 +
 .../HDFS/3.0.0/themes/theme.json                |  179 +
 .../common-services/HDFS/3.0.0/widgets.json     |  649 ++
 .../YARN/3.0.0/MAPREDUCE2_metrics.json          | 2596 ++++++
 .../YARN/3.0.0/YARN_metrics.json                | 3486 ++++++++
 .../YARN/3.0.0/YARN_widgets.json                |  670 ++
 .../common-services/YARN/3.0.0/alerts.json      |  392 +
 .../3.0.0/configuration-mapred/mapred-env.xml   |  104 +
 .../mapred-logsearch-conf.xml                   |   80 +
 .../3.0.0/configuration-mapred/mapred-site.xml  |  540 ++
 .../3.0.0/configuration/capacity-scheduler.xml  |  183 +
 .../3.0.0/configuration/ranger-yarn-audit.xml   |  177 +
 .../ranger-yarn-plugin-properties.xml           |   82 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 +
 .../configuration/ranger-yarn-security.xml      |   58 +
 .../YARN/3.0.0/configuration/yarn-env.xml       |  306 +
 .../YARN/3.0.0/configuration/yarn-log4j.xml     |  103 +
 .../3.0.0/configuration/yarn-logsearch-conf.xml |  104 +
 .../YARN/3.0.0/configuration/yarn-site.xml      | 1151 +++
 .../common-services/YARN/3.0.0/kerberos.json    |  278 +
 .../common-services/YARN/3.0.0/metainfo.xml     |  383 +
 .../package/alerts/alert_nodemanager_health.py  |  209 +
 .../alerts/alert_nodemanagers_summary.py        |  219 +
 .../files/validateYarnComponentStatusWindows.py |  161 +
 .../YARN/3.0.0/package/scripts/__init__.py      |   20 +
 .../scripts/application_timeline_server.py      |  162 +
 .../YARN/3.0.0/package/scripts/historyserver.py |  192 +
 .../YARN/3.0.0/package/scripts/install_jars.py  |   99 +
 .../package/scripts/mapred_service_check.py     |  172 +
 .../3.0.0/package/scripts/mapreduce2_client.py  |   98 +
 .../YARN/3.0.0/package/scripts/nodemanager.py   |  166 +
 .../package/scripts/nodemanager_upgrade.py      |   74 +
 .../YARN/3.0.0/package/scripts/params.py        |   32 +
 .../YARN/3.0.0/package/scripts/params_linux.py  |  476 ++
 .../3.0.0/package/scripts/params_windows.py     |   62 +
 .../3.0.0/package/scripts/resourcemanager.py    |  293 +
 .../YARN/3.0.0/package/scripts/service.py       |  106 +
 .../YARN/3.0.0/package/scripts/service_check.py |  185 +
 .../3.0.0/package/scripts/setup_ranger_yarn.py  |   71 +
 .../YARN/3.0.0/package/scripts/status_params.py |   61 +
 .../YARN/3.0.0/package/scripts/yarn.py          |  498 ++
 .../YARN/3.0.0/package/scripts/yarn_client.py   |   67 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../3.0.0/package/templates/mapreduce.conf.j2   |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/3.0.0/package/templates/yarn.conf.j2   |   35 +
 .../3.0.0/quicklinks-mapred/quicklinks.json     |   80 +
 .../YARN/3.0.0/quicklinks/quicklinks.json       |   80 +
 .../YARN/3.0.0/themes-mapred/theme.json         |  132 +
 .../YARN/3.0.0/themes/theme.json                |  250 +
 .../ZOOKEEPER/3.4.9/metainfo.xml                |   51 +
 .../services/HDFS/configuration/core-site.xml   |   56 -
 .../services/HDFS/configuration/hadoop-env.xml  |  214 +-
 .../services/HDFS/configuration/hdfs-log4j.xml  |  226 -
 .../services/HDFS/configuration/hdfs-site.xml   |  153 -
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  217 -
 .../ranger-hdfs-plugin-properties.xml           |   98 -
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 -
 .../HDFS/configuration/ranger-hdfs-security.xml |   65 -
 .../services/HDFS/configuration/widgets.json    |  649 --
 .../stacks/HDP/3.0/services/HDFS/kerberos.json  |  246 -
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   |   49 +-
 .../services/HDFS/quicklinks/quicklinks.json    |   80 -
 .../HDP/3.0/services/HDFS/themes/theme.json     |  179 -
 .../HDP/3.0/services/YARN/YARN_widgets.json     |  670 --
 .../YARN/configuration-mapred/mapred-env.xml    |   58 +-
 .../YARN/configuration-mapred/mapred-site.xml   |   76 +-
 .../YARN/configuration/capacity-scheduler.xml   |   71 -
 .../YARN/configuration/ranger-yarn-audit.xml    |  177 -
 .../ranger-yarn-plugin-properties.xml           |   82 -
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 -
 .../YARN/configuration/ranger-yarn-security.xml |   58 -
 .../services/YARN/configuration/yarn-env.xml    |  200 -
 .../services/YARN/configuration/yarn-log4j.xml  |  103 -
 .../services/YARN/configuration/yarn-site.xml   |  783 +-
 .../stacks/HDP/3.0/services/YARN/kerberos.json  |  278 -
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   |   96 +-
 .../YARN/quicklinks-mapred/quicklinks.json      |   80 -
 .../services/YARN/quicklinks/quicklinks.json    |   80 -
 .../3.0/services/YARN/themes-mapred/theme.json  |  132 -
 .../HDP/3.0/services/YARN/themes/theme.json     |  250 -
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     |    6 +-
 138 files changed, 36333 insertions(+), 5429 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/alerts.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/alerts.json
new file mode 100644
index 0000000..8ccfa47
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/alerts.json
@@ -0,0 +1,1786 @@
+{
+  "HDFS":{
+    "service": [
+      {
+        "name": "datanode_process_percent",
+        "label": "Percent DataNodes Available",
+        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      },
+      {
+        "name": "datanode_storage_percent",
+        "label": "Percent DataNodes With Available Space",
+        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_storage",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      },
+      {
+        "name": "journalnode_process_percent",
+        "label": "Percent JournalNodes Available",
+        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "journalnode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 33
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 50
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "NAMENODE": [
+      {
+        "name": "namenode_webui",
+        "label": "NameNode Web UI",
+        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "upgrade_finalized_state",
+        "label": "HDFS Upgrade Finalized State",
+        "description": "This service-level alert is triggered if HDFS is not in the finalized state",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py",
+          "parameters": []
+        }
+      },
+      {
+        "name": "namenode_cpu",
+        "label": "NameNode Host CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_blocks_health",
+        "label": "NameNode Blocks Health",
+        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
+            },
+            "warning": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },          
+            "critical": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },
+            "units" : "Blocks"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
+              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_pending_deletion_blocks",
+        "label": "HDFS Pending Deletion Blocks",
+        "description": "This service-level alert is triggered if the number of blocks pending deletion in HDFS exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the PendingDeletionBlock property.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Pending Deletion Blocks:[{0}]"
+            },
+            "warning": {
+              "text": "Pending Deletion Blocks:[{0}]",
+              "value": 100000
+            },
+            "critical": {
+              "text": "Pending Deletion Blocks:[{0}]",
+              "value": 100000
+            },
+            "units" : "Blocks"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/PendingDeletionBlocks"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_capacity_utilization",
+        "label": "HDFS Capacity Utilization",
+        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]"
+            },
+            "warning": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
+              "value": 75
+            },          
+            "critical": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
+              "value": 80
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
+            ],
+            "value": "{0}/({0} + {1}) * 100.0"
+          }
+        }
+      },
+      {
+        "name": "namenode_rpc_latency",
+        "label": "NameNode RPC Latency",
+        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_directory_status",
+        "label": "NameNode Directory Status",
+        "description": "This host-level alert is triggered if the NameNode NameDirStatuses metric (name=NameNodeInfo/NameDirStatuses) reports a failed directory. The threshold values are in the number of directories that are not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Directories are healthy"
+            },
+            "warning": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },          
+            "critical": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },
+            "units" : "Dirs"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
+            ],
+            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
+          }
+        }
+      },
+      {
+        "name": "datanode_health_summary",
+        "label": "DataNode Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy DataNodes",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
+              "alias_key": "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern": "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern": "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "All {2} DataNode(s) are healthy"
+            },
+            "warning": {
+              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
+              "value": 1
+            },
+            "critical": {
+              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
+              "value": 1
+            },
+            "units": "DNs"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumDeadDataNodes",
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumStaleDataNodes",
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumLiveDataNodes"
+            ],
+            "value": "{0} + {1}"
+          }
+        }
+      },
+      {
+        "name": "namenode_last_checkpoint",
+        "label": "NameNode Last Checkpoint",
+        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "checkpoint.time.warning.threshold",
+              "display_name": "Checkpoint Warning",
+              "value": 200,
+              "type": "PERCENT",
+              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.",
+              "units": "%",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "checkpoint.time.critical.threshold",
+              "display_name": "Checkpoint Critical",
+              "value": 200,
+              "type": "PERCENT",
+              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.",
+              "units": "%",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "checkpoint.txns.multiplier.warning.threshold",
+              "display_name": "Uncommitted transactions Warning",
+              "value": 2.0,
+              "type": "NUMERIC",
+              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a warning alert.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "checkpoint.txns.multiplier.critical.threshold",
+              "display_name": "Uncommitted transactions Critical",
+              "value": 4.0,
+              "type": "NUMERIC",
+              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a critical alert.",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_ha_health",
+        "label": "NameNode High Availability Health",
+        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "ignore_host": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_service_rpc_queue_latency_hourly",
+        "label": "NameNode Service RPC Queue Latency (Hourly)",
+        "description": "This service-level alert is triggered if the deviation of RPC queue latency on datanode port has grown beyond the specified threshold within an hour period.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 60,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_client_rpc_queue_latency_hourly",
+        "label": "NameNode Client RPC Queue Latency (Hourly)",
+        "description": "This service-level alert is triggered if the deviation of RPC queue latency on client port has grown beyond the specified threshold within an hour period.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 60,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_service_rpc_processing_latency_hourly",
+        "label": "NameNode Service RPC Processing Latency (Hourly)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within an hour period.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 60,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_client_rpc_processing_latency_hourly",
+        "label": "NameNode Client RPC Processing Latency (Hourly)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within an hour period.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 60,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "increase_nn_heap_usage_daily",
+        "label": "NameNode Heap Usage (Daily)",
+        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "jvm.JvmMetrics.MemHeapUsedM",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 20,
+              "description": "The percentage of NameNode heap usage growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 50,
+              "description": "The percentage of NameNode heap usage growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Heap",
+              "value": 100,
+              "type": "NUMERIC",
+              "units": "MB",
+              "description": "The minimum heap increase in a day."
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_service_rpc_processing_latency_daily",
+        "label": "NameNode Service RPC Processing Latency (Daily)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_client_rpc_processing_latency_daily",
+        "label": "NameNode Client RPC Processing Latency (Daily)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC processing latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_service_rpc_queue_latency_daily",
+        "label": "NameNode Service RPC Queue Latency (Daily)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_client_rpc_queue_latency_daily",
+        "label": "NameNode Client RPC Queue Latency (Daily)",
+        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 100,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 200,
+              "description": "The percentage of RPC queue latency growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Latency",
+              "value": 30,
+              "type": "NUMERIC",
+              "units": "seconds",
+              "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_increase_in_storage_capacity_usage_daily",
+        "label": "HDFS Storage Capacity Usage (Daily)",
+        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a day period.",
+        "interval": 480,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 1440,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "dfs.FSNamesystem.CapacityUsed",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 30,
+              "description": "The percentage of storage capacity usage growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 50,
+              "description": "The percentage of storage capacity usage growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "B",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Capacity",
+              "value": 100,
+              "type": "NUMERIC",
+              "units": "MB",
+              "description": "The minimum capacity increase in a day."
+            }
+          ]
+        }
+      },
+      {
+        "name": "increase_nn_heap_usage_weekly",
+        "label": "NameNode Heap Usage (Weekly)",
+        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a week period.",
+        "interval": 1440,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 10080,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "jvm.JvmMetrics.MemHeapUsedM",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 20,
+              "description": "The percentage of NameNode heap usage growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 50,
+              "description": "The percentage of NameNode heap usage growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Heap",
+              "value": 1000,
+              "type": "NUMERIC",
+              "units": "MB",
+              "description": "The minimum heap increase in a week."
+            }
+          ]
+        }
+      },
+      {
+        "name": "namenode_increase_in_storage_capacity_usage_weekly",
+        "label": "HDFS Storage Capacity Usage (Weekly)",
+        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a week period.",
+        "interval": 1440,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
+          "parameters": [
+            {
+              "name": "mergeHaMetrics",
+              "display_name": "Whether active and stanby NameNodes metrics should be merged",
+              "value": "false",
+              "type": "STRING",
+              "description": "Whether active and stanby NameNodes metrics should be merged.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "interval",
+              "display_name": "Time interval in minutes",
+              "value": 10080,
+              "type": "NUMERIC",
+              "description": "Time interval in minutes.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "appId",
+              "display_name": "AMS application id",
+              "value": "NAMENODE",
+              "type": "STRING",
+              "description": "The application id used to retrieve the metric.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metricName",
+              "display_name": "Metric Name",
+              "value": "dfs.FSNamesystem.CapacityUsed",
+              "type": "STRING",
+              "description": "The metric to monitor.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "metric.deviation.warning.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 10,
+              "description": "The percentage of storage capacity usage growth.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "metric.deviation.critical.threshold",
+              "display_name": "Growth Rate",
+              "type": "PERCENT",
+              "units": "%",
+              "value": 20,
+              "description": "The percentage of storage capacity usage growth.",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "B",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "minimumValue",
+              "display_name": "Minimum Capacity",
+              "value": 1000,
+              "type": "NUMERIC",
+              "units": "MB",
+              "description": "The minimum capacity increase in a week."
+            }
+          ]
+        }
+      }
+    ],
+    "SECONDARY_NAMENODE": [
+      {
+        "name": "secondary_namenode_process",
+        "label": "Secondary NameNode Process",
+        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.secondary.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ],
+    "NFS_GATEWAY": [
+      {
+        "name": "nfsgateway_process",
+        "label": "NFS Gateway Process",
+        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/nfs.server.port}}",
+          "default_port": 2049,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "JOURNALNODE": [
+      {
+        "name": "journalnode_process",
+        "label": "JournalNode Web UI",
+        "description": "This host-level alert is triggered if the JournalNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.journalnode.http-address}}",
+            "https": "{{hdfs-site/dfs.journalnode.https-address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning": {
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ],      
+    "DATANODE": [
+      {
+        "name": "datanode_process",
+        "label": "DataNode Process",
+        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",        
+          "uri": "{{hdfs-site/dfs.datanode.address}}",
+          "default_port": 50010,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_webui",
+        "label": "DataNode Web UI",
+        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },    
+      {
+        "name": "datanode_storage",
+        "label": "DataNode Storage",
+        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]"
+            },
+            "warning": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
+              "value": 75
+            },
+            "critical": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
+              "value": 80
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
+            ],
+            "value": "({1} - {0})/{1} * 100.0"
+          }
+        }
+      },
+      {
+        "name": "datanode_unmounted_data_dir",
+        "label": "DataNode Unmounted Data Dir",
+        "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py"
+        }
+      },
+      {
+        "name": "datanode_heap_usage",
+        "label": "DataNode Heap Usage",
+        "description": "This host-level alert is triggered if heap usage goes past thresholds on the DataNode. It checks the DataNode JMXServlet for the MemHeapUsedM and MemHeapMaxM properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB"
+            },
+            "warning": {
+              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
+              "value": 80
+            },
+            "critical": {
+              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
+              "value": 90
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapUsedM",
+              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapMaxM"
+            ],
+            "value": "100.0 - (({1} - {0})/{1} * 100.0)"
+          }
+        }
+      }
+    ],
+    "ZKFC": [
+      {
+        "name": "hdfs_zookeeper_failover_controller_process",
+        "label": "ZooKeeper Failover Controller Process",
+        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.ha.zkfc.port}}",
+          "default_port": 8019,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}


[07/51] [abbrv] ambari git commit: AMBARI-19012 Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/a85000b8/ambari-web/app/data/HDP2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index 5b66506..e2a4d4e 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -2066,95 +2066,81 @@ var hdp2properties = [
     "index": 5
   },
   {
-    "name": "logsearch_solr_audit_logs_zk_node",
-    "serviceName": "LOGSEARCH",
-    "filename": "logsearch-env.xml",
-    "category": "Advanced logsearch-env",
-    "index": 6
-  },
-  {
-    "name": "logsearch_solr_audit_logs_zk_quorum",
-    "serviceName": "LOGSEARCH",
-    "filename": "logsearch-env.xml",
-    "category": "Advanced logsearch-env",
-    "index": 7
-  },
-  {
     "name": "logsearch_debug_enabled",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 8
+    "index": 6
   },
   {
     "name": "logsearch_debug_port",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 9
+    "index": 7
   },
   {
     "name": "logsearch_truststore_location",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 10
+    "index": 8
   },
   {
     "name": "logsearch_truststore_type",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 11
+    "index": 9
   },
   {
     "name": "logsearch_truststore_password",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 12
+    "index": 10
   },
   {
     "name": "logsearch_keystore_location",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 13
+    "index": 11
   },
   {
     "name": "logsearch_keystore_type",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 14
+    "index": 12
   },
   {
     "name": "logsearch_keystore_password",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 15
+    "index": 13
   },
   {
     "name": "logsearch_kerberos_keytab",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 16
+    "index": 14
   },
   {
     "name": "logsearch_kerberos_principal",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 17
+    "index": 15
   },
   {
     "name": "content",
     "serviceName": "LOGSEARCH",
     "filename": "logsearch-env.xml",
     "category": "Advanced logsearch-env",
-    "index": 18
+    "index": 16
   },
   /*logsearch-log4j*/
   {


[10/51] [abbrv] ambari git commit: AMBARI-19131. Manage Journalnode Wizard: incorrect number of installed JournalNodes (akovalenko)

Posted by sm...@apache.org.
AMBARI-19131. Manage Journalnode Wizard: incorrect number of installed JournalNodes (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9cc66e44
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9cc66e44
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9cc66e44

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 9cc66e447ced78153c0a23cf0d54044029c6af6d
Parents: a85000b
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Wed Dec 7 21:12:48 2016 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Thu Dec 8 13:09:32 2016 +0200

----------------------------------------------------------------------
 .../journalNode/step1_controller.js             | 26 ++++++++++++--------
 1 file changed, 16 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9cc66e44/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
index b6ffe5b..e1252b7 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
@@ -28,8 +28,6 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
 
   mastersToShow: ['JOURNALNODE'],
 
-  mastersToAdd: [],
-
   showInstalledMastersFirst: true,
 
   JOURNALNODES_COUNT_MINIMUM: 3, // TODO get this from stack
@@ -41,14 +39,9 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
    * @param masterComponents
    */
   renderComponents: function(masterComponents) {
-    var jns = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE');
-    var count = jns.get('length');
-    this.set('mastersToAdd', []);
-    if (masterComponents.filterProperty('component_name', 'JOURNALNODE').length == 0) {
-      for (var i = 0; i < count; i++) {
-        this.get('mastersToAdd').push('JOURNALNODE');
-      }
-    }
+    //check if we are restoring components assignment by checking existing of JOURNALNODE component in array
+    var restoringComponents = masterComponents.someProperty('component_name', 'JOURNALNODE');
+    masterComponents = restoringComponents ? masterComponents : masterComponents.concat(this.generateJournalNodeComponents());
     this._super(masterComponents);
     this.updateJournalNodeInfo();
     this.showHideJournalNodesAddRemoveControl();
@@ -56,6 +49,19 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
   },
 
   /**
+   * Create JOURNALNODE components to add them to masters array
+   */
+  generateJournalNodeComponents: function () {
+    var journalNodes = [];
+    App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').forEach(function (jn) {
+      var jnComponent = this.createComponentInstallationObject(Em.Object.create({serviceName: jn.get('service.serviceName'), componentName: jn.get('componentName')}), jn.get('hostName'));
+      jnComponent.isInstalled = true;
+      journalNodes.push(jnComponent);
+    }, this);
+    return journalNodes;
+  },
+
+  /**
    * Enable/Disable show/hide operation for each JournalNode
    */
   showHideJournalNodesAddRemoveControl: function() {


[31/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/MAPREDUCE2_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/MAPREDUCE2_metrics.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/MAPREDUCE2_metrics.json
new file mode 100644
index 0000000..f44e3b2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/MAPREDUCE2_metrics.json
@@ -0,0 +1,2596 @@
+{
+  "HISTORYSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:type=Threading.AllThreadIds",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/LoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.LoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/UnloadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/TotalLoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/Verbose": {
+              "metric": "java.lang:type=ClassLoading.Verbose",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPath": {
+              "metric": "java.lang:type=Runtime.BootClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/LibraryPath": {
+              "metric": "java.lang:type=Runtime.LibraryPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmName": {
+              "metric": "java.lang:type=Runtime.VmName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVendor": {
+              "metric": "java.lang:type=Runtime.VmVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVersion": {
+              "metric": "java.lang:type=Runtime.VmVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPathSupported": {
+              "metric": "java.lang:type=Runtime.BootClassPathSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/InputArguments": {
+              "metric": "java.lang:type=Runtime.InputArguments",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ManagementSpecVersion": {
+              "metric": "java.lang:type=Runtime.ManagementSpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecName": {
+              "metric": "java.lang:type=Runtime.SpecName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVendor": {
+              "metric": "java.lang:type=Runtime.SpecVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVersion": {
+              "metric": "java.lang:type=Runtime.SpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SystemProperties": {
+              "metric": "java.lang:type=Runtime.SystemProperties",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Uptime": {
+              "metric": "java.lang:type=Runtime.Uptime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Name": {
+              "metric": "java.lang:type=Runtime.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ClassPath": {
+              "metric": "java.lang:type=Runtime.ClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/MaxFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/OpenFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/CommittedVirtualMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreePhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreeSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuTime": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalPhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/AvailableProcessors": {
+              "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Version": {
+              "metric": "java.lang:type=OperatingSystem.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Arch": {
+              "metric": "java.lang:type=OperatingSystem.Arch",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemLoadAverage": {
+              "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Name": {
+              "metric": "java.lang:type=OperatingSystem.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Count": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Name": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Count": {
+              "metric": "java.nio:type=BufferPool,name=direct.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Name": {
+              "metric": "java.nio:type=BufferPool,name=direct.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:type=Threading.AllThreadIds",
+            

<TRUNCATED>

[02/51] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-18456' into trunk

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
index 314e955..d4c90b8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -25,18 +34,11 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.inject.Injector;
 
 /**
  * Tests OozieConfigCalculation logic
@@ -53,52 +55,28 @@ public class FixOozieAdminUsersTest {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("falcon_user", "falcon");
+    }};
+
+    Config falconEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(falconEnvConfig.getType()).andReturn("falcon-env").anyTimes();
+    expect(falconEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>() {{
+      put("oozie_admin_users", "oozie, oozie-admin");
+    }};
+
+    Config oozieEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(oozieEnvConfig.getType()).andReturn("oozie-env").anyTimes();
+    expect(oozieEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-    Config falconEnvConfig = new ConfigImpl("falcon-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("falcon_user", "falcon");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-    Config oozieEnvConfig = new ConfigImpl("oozie-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("oozie_admin_users", "oozie, oozie-admin");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
     expect(cluster.getDesiredConfigByType("falcon-env")).andReturn(falconEnvConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("oozie-env")).andReturn(oozieEnvConfig).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
-    replay(injector, clusters);
+    replay(injector, clusters, falconEnvConfig, oozieEnvConfig);
 
     clustersField = FixOozieAdminUsers.class.getDeclaredField("clusters");
     clustersField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
index 4c1d7a3..f8a5373 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,21 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -55,99 +57,86 @@ public class HBaseEnvMaxDirectMemorySizeActionTest {
     injector = EasyMock.createMock(Injector.class);
     clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
-
-    Config hbaseEnv = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content","# Set environment variables here.\n" +
-          "\n" +
-          "# The java implementation to use. Java 1.6 required.\n" +
-          "export JAVA_HOME={{java64_home}}\n" +
-          "\n" +
-          "# HBase Configuration directory\n" +
-          "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
-          "\n" +
-          "# Extra Java CLASSPATH elements. Optional.\n" +
-          "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-          "\n" +
-          "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-          "# export HBASE_HEAPSIZE=1000\n" +
-          "\n" +
-          "# Extra Java runtime options.\n" +
-          "# Below are what we set by default. May only work with SUN JVM.\n" +
-          "# For more on why as well as other possible settings,\n" +
-          "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
-          "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-          "# Uncomment below to enable java garbage collection logging.\n" +
-          "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "\n" +
-          "# Uncomment and adjust to enable JMX exporting\n" +
-          "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
-          "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
-          "#\n" +
-          "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
-          "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
-          "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
-          "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
-          "\n" +
-          "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
-          "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
-          "\n" +
-          "# Extra ssh options. Empty by default.\n" +
-          "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
-          "\n" +
-          "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
-          "export HBASE_LOG_DIR={{log_dir}}\n" +
-          "\n" +
-          "# A string representing this instance of hbase. $USER by default.\n" +
-          "# export HBASE_IDENT_STRING=$USER\n" +
-          "\n" +
-          "# The scheduling priority for daemon processes. See 'man nice'.\n" +
-          "# export HBASE_NICENESS=10\n" +
-          "\n" +
-          "# The directory where pid files are stored. /tmp by default.\n" +
-          "export HBASE_PID_DIR={{pid_dir}}\n" +
-          "\n" +
-          "# Seconds to sleep between slave commands. Unset by default. This\n" +
-          "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
-          "# otherwise arrive faster than the master can service them.\n" +
-          "# export HBASE_SLAVE_SLEEP=0.1\n" +
-          "\n" +
-          "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
-          "export HBASE_MANAGES_ZK=false\n" +
-          "\n" +
-          "{% if security_enabled %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
-          "{% else %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
-          "{% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Config hbaseEnv = EasyMock.createNiceMock(Config.class);
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content","# Set environment variables here.\n" +
+        "\n" +
+        "# The java implementation to use. Java 1.6 required.\n" +
+        "export JAVA_HOME={{java64_home}}\n" +
+        "\n" +
+        "# HBase Configuration directory\n" +
+        "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
+        "\n" +
+        "# Extra Java CLASSPATH elements. Optional.\n" +
+        "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
+        "\n" +
+        "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+        "# export HBASE_HEAPSIZE=1000\n" +
+        "\n" +
+        "# Extra Java runtime options.\n" +
+        "# Below are what we set by default. May only work with SUN JVM.\n" +
+        "# For more on why as well as other possible settings,\n" +
+        "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
+        "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
+        "# Uncomment below to enable java garbage collection logging.\n" +
+        "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "\n" +
+        "# Uncomment and adjust to enable JMX exporting\n" +
+        "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
+        "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
+        "#\n" +
+        "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
+        "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
+        "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
+        "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
+        "\n" +
+        "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
+        "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
+        "\n" +
+        "# Extra ssh options. Empty by default.\n" +
+        "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
+        "\n" +
+        "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
+        "export HBASE_LOG_DIR={{log_dir}}\n" +
+        "\n" +
+        "# A string representing this instance of hbase. $USER by default.\n" +
+        "# export HBASE_IDENT_STRING=$USER\n" +
+        "\n" +
+        "# The scheduling priority for daemon processes. See 'man nice'.\n" +
+        "# export HBASE_NICENESS=10\n" +
+        "\n" +
+        "# The directory where pid files are stored. /tmp by default.\n" +
+        "export HBASE_PID_DIR={{pid_dir}}\n" +
+        "\n" +
+        "# Seconds to sleep between slave commands. Unset by default. This\n" +
+        "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
+        "# otherwise arrive faster than the master can service them.\n" +
+        "# export HBASE_SLAVE_SLEEP=0.1\n" +
+        "\n" +
+        "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
+        "export HBASE_MANAGES_ZK=false\n" +
+        "\n" +
+        "{% if security_enabled %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
+        "{% else %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
+        "{% endif %}");
+    }};
+
+    expect(hbaseEnv.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hbase-env")).andReturn(hbaseEnv).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
 
-    replay(injector, clusters, cluster);
+    replay(injector, clusters, cluster, hbaseEnv);
 
     m_clusterField = HBaseEnvMaxDirectMemorySizeAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
index 9bde631..8926203 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,22 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -57,79 +58,66 @@ public class HiveEnvClasspathActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hiveEnv = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
-          "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
-          "      if [ -z \"$DEBUG\" ]; then\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
-          "      else\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
-          "      fi\n" +
-          "      fi\n" +
-          "\n" +
-          "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-          "\n" +
-          "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-          "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-          "      else\n" +
-          "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-          "      fi\n" +
-          "\n" +
-          "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-          "\n" +
-          "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
-          "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
-          "      # appropriate for hive server (hwi etc).\n" +
-          "\n" +
-          "\n" +
-          "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
-          "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
-          "\n" +
-          "      # Hive Configuration Directory can be controlled by:\n" +
-          "      export HIVE_CONF_DIR=test\n" +
-          "\n" +
-          "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
-          "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-          "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "\n" +
-          "      export METASTORE_PORT={{hive_metastore_port}}\n" +
-          "\n" +
-          "      {% if sqla_db_used or lib_dir_available %}\n" +
-          "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      {% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
+        "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
+        "      if [ -z \"$DEBUG\" ]; then\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
+        "      else\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
+        "      fi\n" +
+        "      fi\n" +
+        "\n" +
+        "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+        "\n" +
+        "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+        "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
+        "      else\n" +
+        "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+        "      fi\n" +
+        "\n" +
+        "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+        "\n" +
+        "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
+        "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
+        "      # appropriate for hive server (hwi etc).\n" +
+        "\n" +
+        "\n" +
+        "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
+        "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
+        "\n" +
+        "      # Hive Configuration Directory can be controlled by:\n" +
+        "      export HIVE_CONF_DIR=test\n" +
+        "\n" +
+        "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
+        "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
+        "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "\n" +
+        "      export METASTORE_PORT={{hive_metastore_port}}\n" +
+        "\n" +
+        "      {% if sqla_db_used or lib_dir_available %}\n" +
+        "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      {% endif %}");
+    }};
+
+    Config hiveEnv = EasyMock.createNiceMock(Config.class);
+    expect(hiveEnv.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveEnv).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hiveEnv);
 
     m_clusterField = HiveEnvClasspathAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
index 907194c..cd5eb9d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
@@ -91,7 +91,7 @@ public class HiveZKQuorumConfigActionTest {
     m_hiveSiteConfig.setProperties(EasyMock.anyObject(Map.class));
     EasyMock.expectLastCall().once();
 
-    m_hiveSiteConfig.persist(false);
+    m_hiveSiteConfig.save();
     EasyMock.expectLastCall().once();
 
     EasyMock.expect(m_cluster.getDesiredConfigByType(HiveZKQuorumConfigAction.HIVE_SITE_CONFIG_TYPE)).andReturn(m_hiveSiteConfig).atLeastOnce();

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
index d374d75..d18f727 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMock;
@@ -65,26 +64,13 @@ public class KerberosKeytabsActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     m_kerberosHelper = EasyMock.createMock(KerberosHelper.class);
 
-    m_kerberosConfig = new ConfigImpl("kerberos-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kerberos-env", "");
-      }};
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("kerberos-env", "");
+    }};
 
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    m_kerberosConfig = EasyMock.createNiceMock(Config.class);
+    expect(m_kerberosConfig.getType()).andReturn("kerberos-env").anyTimes();
+    expect(m_kerberosConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
@@ -92,7 +78,7 @@ public class KerberosKeytabsActionTest {
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
 
-    replay(m_clusters, cluster);
+    replay(m_clusters, cluster, m_kerberosConfig);
 
     m_injector = Guice.createInjector(new AbstractModule() {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
index e673714..7a6a6c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
@@ -35,7 +35,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -57,54 +56,27 @@ public class RangerConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config adminConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("DB_FLAVOR", "MYSQL");
-        put("db_host", "host1");
-        put("db_name", "ranger");
-        put("audit_db_name", "ranger_audit");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("DB_FLAVOR", "MYSQL");
+      put("db_host", "host1");
+      put("db_name", "ranger");
+      put("audit_db_name", "ranger_audit");
+    }};
+
+    Config adminConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
+
 
     expect(cluster.getDesiredConfigByType("admin-properties")).andReturn(adminConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("ranger-admin-site")).andReturn(adminSiteConfig).atLeastOnce();
@@ -113,7 +85,7 @@ public class RangerConfigCalculationTest {
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, adminConfig, adminSiteConfig, rangerEnv);
 
     m_clusterField = RangerConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
index 25acb45..06092c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Field;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -34,9 +35,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,124 +59,50 @@ public class RangerKerberosConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hadoopConfig = new ConfigImpl("hadoop-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hdfs_user", "hdfs");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-
-    Config hiveConfig = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hive_user", "hive");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config yarnConfig = new ConfigImpl("yarn-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn_user", "yarn");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hbaseConfig = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hbase_user", "hbase");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config knoxConfig = new ConfigImpl("knox-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("knox_user", "knox");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config stormConfig = new ConfigImpl("storm-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("storm_user", "storm");
-        put("storm_principal_name", "storm-c1@EXAMLE.COM");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kafkaConfig = new ConfigImpl("kafka-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kafka_user", "kafka");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kmsConfig = new ConfigImpl("kms-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kms_user", "kms");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hdfsSiteConfig = new ConfigImpl("hdfs-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("ranger-admin-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config hadoopConfig = EasyMock.createNiceMock(Config.class);
+    expect(hadoopConfig.getType()).andReturn("hadoop-env").anyTimes();
+    expect(hadoopConfig.getProperties()).andReturn(Collections.singletonMap("hdfs_user", "hdfs")).anyTimes();
+
+    Config hiveConfig = EasyMock.createNiceMock(Config.class);
+    expect(hiveConfig.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveConfig.getProperties()).andReturn(Collections.singletonMap("hive_user", "hive")).anyTimes();
+
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-env").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(Collections.singletonMap("yarn_user", "yarn")).anyTimes();
+
+    Config hbaseConfig = EasyMock.createNiceMock(Config.class);
+    expect(hbaseConfig.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseConfig.getProperties()).andReturn(Collections.singletonMap("hbase_user", "hbase")).anyTimes();
+
+    Config knoxConfig = EasyMock.createNiceMock(Config.class);
+    expect(knoxConfig.getType()).andReturn("knox-env").anyTimes();
+    expect(knoxConfig.getProperties()).andReturn(Collections.singletonMap("knox_user", "knox")).anyTimes();
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("storm_user", "storm");
+      put("storm_principal_name", "storm-c1@EXAMLE.COM");
+    }};
+
+    Config stormConfig = EasyMock.createNiceMock(Config.class);
+    expect(stormConfig.getType()).andReturn("storm-env").anyTimes();
+    expect(stormConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config kafkaConfig = EasyMock.createNiceMock(Config.class);
+    expect(kafkaConfig.getType()).andReturn("kafka-env").anyTimes();
+    expect(kafkaConfig.getProperties()).andReturn(Collections.singletonMap("kafka_user", "kafka")).anyTimes();
+
+    Config kmsConfig = EasyMock.createNiceMock(Config.class);
+    expect(kmsConfig.getType()).andReturn("kms-env").anyTimes();
+    expect(kmsConfig.getProperties()).andReturn(Collections.singletonMap("kms_user", "kms")).anyTimes();
+
+    Config hdfsSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(hdfsSiteConfig.getType()).andReturn("hdfs-site").anyTimes();
+    expect(hdfsSiteConfig.getProperties()).andReturn(Collections.singletonMap("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab")).anyTimes();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("ranger-admin-site").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(new HashMap<String,String>()).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hadoop-env")).andReturn(hadoopConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveConfig).atLeastOnce();
@@ -193,7 +119,8 @@ public class RangerKerberosConfigCalculationTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hadoopConfig, hiveConfig, yarnConfig, hbaseConfig,
+        knoxConfig, stormConfig, kafkaConfig, kmsConfig, hdfsSiteConfig, adminSiteConfig);
 
     m_clusterField = RangerKerberosConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
@@ -236,7 +163,7 @@ public class RangerKerberosConfigCalculationTest {
     assertTrue(map.containsKey("ranger.plugins.storm.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kafka.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kms.serviceuser"));
-    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));    
+    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));
 
 
     assertEquals("hdfs", map.get("ranger.plugins.hdfs.serviceuser"));
@@ -254,4 +181,4 @@ public class RangerKerberosConfigCalculationTest {
 
   }
 
-} 
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
index e000c65..7a0d66f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
@@ -34,9 +34,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,34 +54,19 @@ public class RangerKmsProxyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
+    Map<String, String> mockProperties = new HashMap<String, String>() {
+      {
         put("ranger_user", "ranger");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
       }
     };
 
-    Config kmsSite = new ConfigImpl("kms-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config kmsSite = EasyMock.createNiceMock(Config.class);
+    expect(kmsSite.getType()).andReturn("kms-site").anyTimes();
+    expect(kmsSite.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("ranger-env")).andReturn(rangerEnv).atLeastOnce();
     expect(cluster.getDesiredConfigByType("kms-site")).andReturn(kmsSite).atLeastOnce();
@@ -90,7 +74,7 @@ public class RangerKmsProxyConfigTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, rangerEnv, kmsSite);
 
     m_clusterField = RangerKmsProxyConfig.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
index e65a824..518ab42 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,33 +57,20 @@ public class SparkShufflePropertyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("yarn.nodemanager.aux-services", "some_service");
+    }};
 
-    Config adminConfig = new ConfigImpl("yarn-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn.nodemanager.aux-services", "some_service");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-site").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(adminConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(yarnConfig).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters);
+    replay(m_injector, m_clusters, yarnConfig);
 
     clusterField = SparkShufflePropertyConfig.class.getDeclaredField("clusters");
     clusterField.setAccessible(true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 8f9d4f4..262b10a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -67,7 +67,7 @@ import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -153,6 +153,8 @@ public class UpgradeActionTest {
   private AmbariMetaInfo ambariMetaInfo;
   @Inject
   private FinalizeUpgradeAction finalizeUpgradeAction;
+  @Inject
+  private ConfigFactory configFactory;
 
   @Before
   public void setup() throws Exception {
@@ -1043,24 +1045,22 @@ public class UpgradeActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "zookeeper-env", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "zookeeper-env", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("zookeeper_a", "value_1");
     properties.put("zookeeper_b", "value_2");
 
-    Config c2 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("hdfs_a", "value_3");
     properties.put("hdfs_b", "value_4");
 
-    Config c3 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c4 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    cluster.addConfig(c4);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-    c4.persist();
+    configFactory.createNew(cluster, "core-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
+    configFactory.createNew(cluster, "foo-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index 80665a5..f55bf62 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -89,8 +89,7 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("a", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = configFactory.createNew(cluster, "hdfs-site", properties, propertiesAttributes);
-    config.setTag("testversion");
+    Config config = configFactory.createNew(cluster, "hdfs-site", "testversion", properties, propertiesAttributes);
 
     Host host = clusters.getHost("h1");
 
@@ -103,7 +102,6 @@ public class ConfigGroupTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "cg-test",
       "HDFS", "New HDFS configs for h1", configs, hosts);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
     return configGroup;
   }
@@ -154,28 +152,28 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("key1", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = new ConfigImpl("test-site");
-    config.setProperties(properties);
-    config.setPropertiesAttributes(propertiesAttributes);
-    config.setTag("version100");
 
-    configGroup.addConfiguration(config);
+    Config config = configFactory.createNew(cluster, "test-site", "version100", properties, propertiesAttributes);
+    Map<String, Config> newConfigurations = new HashMap<>(configGroup.getConfigurations());
+    newConfigurations.put(config.getType(), config);
+
+    configGroup.setConfigurations(newConfigurations);
     Assert.assertEquals(2, configGroup.getConfigurations().values().size());
 
+    // re-request it and verify that the config was added
+    configGroupEntity = configGroupDAO.findById(configGroup.getId());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
+
     configGroup.setName("NewName");
     configGroup.setDescription("NewDesc");
     configGroup.setTag("NewTag");
 
     // Save
-    configGroup.persist();
-    configGroup.refresh();
     configGroupEntity = configGroupDAO.findByName("NewName");
 
     Assert.assertNotNull(configGroupEntity);
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupHostMappingEntities().size());
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupConfigMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupHostMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
     Assert.assertEquals("NewTag", configGroupEntity.getTag());
     Assert.assertEquals("NewDesc", configGroupEntity.getDescription());
     Assert.assertNotNull(cluster.getConfig("test-site", "version100"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index d50c92d..526e462 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -38,7 +38,6 @@ import javax.persistence.EntityManager;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.ClusterRequest;
@@ -88,6 +87,7 @@ public class ConfigHelperTest {
     private static ConfigHelper configHelper;
     private static AmbariManagementController managementController;
     private static AmbariMetaInfo metaInfo;
+    private static ConfigFactory configFactory;
 
     @BeforeClass
     public static void setup() throws Exception {
@@ -102,6 +102,7 @@ public class ConfigHelperTest {
       configHelper = injector.getInstance(ConfigHelper.class);
       managementController = injector.getInstance(AmbariManagementController.class);
       metaInfo = injector.getInstance(AmbariMetaInfo.class);
+      configFactory = injector.getInstance(ConfigFactory.class);
 
       clusterName = "c1";
       clusters.addCluster(clusterName, new StackId("HDP-2.0.6"));
@@ -251,7 +252,6 @@ public class ConfigHelperTest {
       LOG.info("Config group created with tag " + tag);
       configGroup.setTag(tag);
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
 
       return configGroup.getId();
@@ -339,14 +339,11 @@ public class ConfigHelperTest {
         add(clusterRequest6);
       }}, null);
 
-      final Config config = new ConfigImpl("ams-env");
-      config.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config.setProperties(properties);
 
+      final Config config = configFactory.createNew(cluster, "ams-env", "version122", properties, null);
       Long groupId = addConfigGroup("g1", "t1", new ArrayList<String>() {{
         add("h1");
       }}, new ArrayList<Config>() {{
@@ -419,19 +416,14 @@ public class ConfigHelperTest {
         add(clusterRequest3);
       }}, null);
 
-      final Config config1 = new ConfigImpl("core-site2");
-      config1.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config1.setProperties(properties);
+      final Config config1 = configFactory.createNew(cluster, "core-site2", "version122", properties, null);
 
-      final Config config2 = new ConfigImpl("global2");
-      config2.setTag("version122");
       Map<String, String> properties2 = new HashMap<String, String>();
       properties2.put("namenode_heapsize", "1111");
-      config2.setProperties(properties2);
+      final Config config2 = configFactory.createNew(cluster, "global2", "version122", properties2, null);
 
       Long groupId = addConfigGroup("g2", "t1", new ArrayList<String>() {{
         add("h1");
@@ -511,24 +503,23 @@ public class ConfigHelperTest {
       }}, null);
 
 
-      final Config config1 = new ConfigImpl("core-site3");
-      config1.setTag("version122");
-
       Map<String, String> attributes = new HashMap<String, String>();
       attributes.put("fs.trash.interval", "11");
       attributes.put("b", "y");
       Map<String, Map<String, String>> config1Attributes = new HashMap<String, Map<String, String>>();
       config1Attributes.put("attribute1", attributes);
-      config1.setPropertiesAttributes(config1Attributes);
 
-      final Config config2 = new ConfigImpl("global3");
-      config2.setTag("version122");
+      final Config config1 = configFactory.createNew(cluster, "core-site3", "version122",
+          new HashMap<String, String>(), config1Attributes);
+
       attributes = new HashMap<String, String>();
       attributes.put("namenode_heapsize", "z");
       attributes.put("c", "q");
       Map<String, Map<String, String>> config2Attributes = new HashMap<String, Map<String, String>>();
       config2Attributes.put("attribute2", attributes);
-      config2.setPropertiesAttributes(config2Attributes);
+
+      final Config config2 = configFactory.createNew(cluster, "global3", "version122",
+          new HashMap<String, String>(), config2Attributes);
 
       Long groupId = addConfigGroup("g3", "t1", new ArrayList<String>() {{
         add("h3");
@@ -690,7 +681,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -718,7 +710,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -744,7 +737,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, null, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, null);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -772,7 +766,8 @@ public class ConfigHelperTest {
       confGroupFinalAttrs.put("b", "true");
       confGroupAttributes.put("final", confGroupFinalAttrs);
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", null, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", "version122",
+          new HashMap<String,String>(), confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -921,8 +916,10 @@ public class ConfigHelperTest {
       List<String> hosts = new ArrayList<String>();
       hosts.add("h1");
       List<Config> configs = new ArrayList<Config>();
-      ConfigImpl configImpl = new ConfigImpl("flume-conf");
-      configImpl.setTag("FLUME1");
+
+      Config configImpl = configFactory.createNew(cluster, "flume-conf", "FLUME1",
+          new HashMap<String,String>(), null);
+
       configs.add(configImpl);
       addConfigGroup("configGroup1", "FLUME", hosts, configs);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
index 1867bda..ede94dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
@@ -56,12 +56,12 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
-import org.junit.experimental.categories.Category;
 
 /**
  * Tests the {@link AlertReceivedListener}.
@@ -835,17 +835,13 @@ public class AlertReceivedListenerTest {
   @SuppressWarnings("serial")
   public void testAlertFirmnessUsingGlobalValueHigherThanOverride() throws Exception {
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV,
+    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV, "version2",
         new HashMap<String, String>() {
           {
             put(ConfigHelper.CLUSTER_ENV_ALERT_REPEAT_TOLERANCE, "3");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    m_cluster.addConfig(config);
     m_cluster.addDesiredConfig("user", Collections.singleton(config));
 
     String definitionName = ALERT_DEFINITION + "1";

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index 4fdcc22..9dc405e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -124,14 +124,11 @@ public class ClusterDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    config1.persist();
-    config2.persist();
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
+
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     // 100 hosts
@@ -186,7 +183,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -221,7 +218,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -267,7 +264,7 @@ public class ClusterDeadlockTest {
       clusterWriterThread.start();
       schWriterThread.start();
     }
-    
+
     DeadlockWarningThread wt = new DeadlockWarningThread(threads, 20, 1000);
     while (true) {
       if(!wt.isAlive()) {
@@ -337,7 +334,7 @@ public class ClusterDeadlockTest {
     @Override
     public void run() {
       for (int i =0; i<300; i++) {
-        config.persist(false);
+        config.save();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 90a3d02..fc3646a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -87,7 +87,6 @@ import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
@@ -964,21 +963,14 @@ public class ClusterTest {
     Map<String, Map<String, String>> c2PropAttributes = new HashMap<String, Map<String,String>>();
     c2PropAttributes.put("final", new HashMap<String, String>());
     c2PropAttributes.get("final").put("x", "true");
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, c1PropAttributes);
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, c2PropAttributes);
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     c1.addDesiredConfig("_test", Collections.singleton(config1));
     Config res = c1.getDesiredConfigByType("global");
@@ -998,21 +990,14 @@ public class ClusterTest {
   public void testDesiredConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     try {
       c1.addDesiredConfig(null, Collections.singleton(config1));
@@ -1132,18 +1117,11 @@ public class ClusterTest {
 
     c1.addService("HDFS");
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    config1.persist();
-    c1.addConfig(config1);
-    config2.persist();
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1209,10 +1187,9 @@ public class ClusterTest {
     Map<String, Map<String, String>> propAttributes = new HashMap<String, Map<String,String>>();
     propAttributes.put("final", new HashMap<String, String>());
     propAttributes.get("final").put("test", "true");
-    Config config = configFactory.createNew(c1, "hdfs-site", new HashMap<String, String>(){{
+    Config config = configFactory.createNew(c1, "hdfs-site", "1", new HashMap<String, String>(){{
       put("test", "test");
     }}, propAttributes);
-    config.setTag("1");
 
     host1.addDesiredConfig(c1.getClusterId(), true, "test", config);
 
@@ -1247,16 +1224,11 @@ public class ClusterTest {
   public void testServiceConfigVersions() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     c1.addDesiredConfig("admin", Collections.singleton(config1));
     List<ServiceConfigVersionResponse> serviceConfigVersions =
@@ -1310,16 +1282,11 @@ public class ClusterTest {
   public void testSingleServiceVersionForMultipleConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1345,11 +1312,8 @@ public class ClusterTest {
   public void testServiceConfigVersionsForGroups() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    c1.addConfig(config1);
 
     ServiceConfigVersionResponse scvResponse =
       c1.addDesiredConfig("admin", Collections.singleton(config1));
@@ -1361,16 +1325,13 @@ public class ClusterTest {
     Assert.assertEquals("Only one scv should be active", 1, activeServiceConfigVersions.get("HDFS").size());
 
     //create config group
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("a", "c"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
     ConfigGroup configGroup =
       configGroupFactory.createNew(c1, "test group", "HDFS", "descr", Collections.singletonMap("hdfs-site", config2),
         Collections.<Long, Host>emptyMap());
 
-    configGroup.persist();
-
     c1.addConfigGroup(configGroup);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1381,12 +1342,11 @@ public class ClusterTest {
     Assert.assertEquals("Two service config versions should be active, for default and test groups",
       2, activeServiceConfigVersions.get("HDFS").size());
 
-    Config config3 = configFactory.createNew(c1, "hdfs-site",
+    Config config3 = configFactory.createNew(c1, "hdfs-site", "version3",
       new HashMap<String, String>() {{ put("a", "d"); }}, new HashMap<String, Map<String,String>>());
 
     configGroup.setConfigurations(Collections.singletonMap("hdfs-site", config3));
 
-    configGroup.persist();
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
     assertEquals("SCV 3 should be created", Long.valueOf(3), scvResponse.getVersion());
 
@@ -1417,16 +1377,14 @@ public class ClusterTest {
 
     //check config with empty cluster
 
-    Config config4 = new ConfigImpl("hdfs-site");
-    config4.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
+    Config config4 = configFactory.createReadOnly("hdfs-site", "version4",
+        Collections.singletonMap("a", "b"), null);
 
     ConfigGroup configGroup2 =
-        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr", Collections.singletonMap("hdfs-site", config4),
+        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr",
+            new HashMap<>(Collections.singletonMap("hdfs-site", config4)),
             Collections.<Long, Host>emptyMap());
 
-    configGroup2.persist();
     c1.addConfigGroup(configGroup2);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup2);
@@ -1443,12 +1401,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1459,11 +1413,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1507,12 +1460,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1523,11 +1472,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -2373,17 +2321,13 @@ public class ClusterTest {
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
     assertEquals(0, clusterEntity.getClusterConfigEntities().size());
 
-    final Config originalConfig = configFactory.createNew(cluster, "foo-site",
+    final Config originalConfig = configFactory.createNew(cluster, "foo-site", "version3",
         new HashMap<String, String>() {
           {
             put("one", "two");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    originalConfig.setTag("version3");
-    originalConfig.persist();
-    cluster.addConfig(originalConfig);
-
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1", "t1", "",
         new HashMap<String, Config>() {
           {
@@ -2391,7 +2335,6 @@ public class ClusterTest {
           }
         }, Collections.<Long, Host> emptyMap());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     clusterEntity = clusterDAO.findByName("c1");
@@ -2403,8 +2346,7 @@ public class ClusterTest {
     Map<String, String> properties = config.getProperties();
     properties.put("three", "four");
     config.setProperties(properties);
-
-    config.persist(false);
+    config.save();
 
     clusterEntity = clusterDAO.findByName("c1");
     assertEquals(1, clusterEntity.getClusterConfigEntities().size());
@@ -2545,13 +2487,7 @@ public class ClusterTest {
 
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -2562,12 +2498,7 @@ public class ClusterTest {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 5886234..d75d9d0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -405,19 +405,15 @@ public class ClustersTest {
     cluster.transitionClusterVersion(stackId, stackId.getStackVersion(),
         RepositoryVersionState.CURRENT);
 
-    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "1",
         new HashMap<String, String>() {{
           put("prop1", "val1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("1");
-    config1.persist();
 
-    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "2",
         new HashMap<String, String>() {{
           put("prop2", "val2");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("2");
-    config2.persist();
 
     // cluster desired config
     cluster.addDesiredConfig("_test", Collections.singleton(config1));

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 1f09002..96dbf26 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -114,17 +114,12 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    config1.persist();
-    config2.persist();
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     String hostName = "c6401";


[06/51] [abbrv] ambari git commit: AMBARI-19025. Add livy.spark.master to livy.conf and update spark-blacklist.conf (Jeff Zhang via smohanty)

Posted by sm...@apache.org.
AMBARI-19025. Add livy.spark.master to livy.conf and update spark-blacklist.conf (Jeff Zhang via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aedf2c00
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aedf2c00
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aedf2c00

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: aedf2c00b9d5be7b2034ed50ee25b49db829e9c3
Parents: 9c16bef
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Dec 7 23:00:54 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Dec 7 23:00:54 2016 -0800

----------------------------------------------------------------------
 .../services/SPARK/configuration/livy-conf.xml  | 73 ++++++++++++++++++++
 .../configuration/livy-spark-blacklist.xml      | 52 ++++++++++++++
 2 files changed, 125 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aedf2c00/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
new file mode 100644
index 0000000..b7bfa73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>livy.environment</name>
+    <value>production</value>
+    <description>
+            Specifies Livy's environment. May either be "production" or "development". In "development"
+            mode, Livy will enable debugging options, such as reporting possible routes on a 404.
+            defaults to development
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.port</name>
+    <value>8998</value>
+    <description>
+            What port to start the server on. Defaults to 8998.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.session.timeout</name>
+    <value>3600000</value>
+    <description>
+            Time in milliseconds on how long Livy will wait before timing out an idle session.
+            Default is one hour.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.impersonation.enabled</name>
+    <value>true</value>
+    <description>
+            If livy should use proxy users when submitting a job.
+        </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>livy.server.csrf_protection.enabled</name>
+    <value>true</value>
+    <description>
+            Whether to enable csrf protection for livy's rest api.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+     <name>livy.spark.master</name>
+     <value>yarn-cluster</value>
+     <description>
+           spark.master property for spark engine
+     </description>
+     <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aedf2c00/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
new file mode 100644
index 0000000..d4f27bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <description>spark-blacklist.properties</description>
+    <value>
+#
+# Configuration override / blacklist. Defines a list of properties that users are not allowed
+# to override when starting Spark sessions.
+#
+# This file takes a list of property names (one per line). Empty lines and lines starting with "#"
+# are ignored.
+#
+
+# Disallow overriding the master and the deploy mode.
+spark.master
+spark.submit.deployMode
+
+# Disallow overriding the location of Spark cached jars.
+spark.yarn.jar
+spark.yarn.jars
+spark.yarn.archive
+
+# Don't allow users to override the RSC timeout.
+livy.rsc.server.idle_timeout
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[46/51] [abbrv] ambari git commit: AMBARI-19114. Add property existence check to druid stack advisor code (Nishant Bangarwa via smohanty)

Posted by sm...@apache.org.
AMBARI-19114. Add property existence check to druid stack advisor code (Nishant Bangarwa via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2a840811
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2a840811
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2a840811

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 2a840811896ddb84343dea1db12842ef35e30d3d
Parents: 6029846
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Dec 9 06:58:15 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Dec 9 06:58:15 2016 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/stack_advisor.py    |  4 ++
 .../stacks/2.6/common/test_stack_advisor.py     | 52 ++++++++++++++++++++
 2 files changed, 56 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2a840811/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index bebeaf8..562444b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -53,6 +53,10 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
 
   def recommendDruidConfigurations(self, configurations, clusterData, services, hosts):
 
+      # druid is not in list of services to be installed
+      if 'druid-common' not in services['configurations']:
+        return
+
       componentsListList = [service["components"] for service in services["services"]]
       componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
       servicesList = [service["StackServices"]["service_name"] for service in services["services"]]

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a840811/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 011a874..074d87e 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -424,6 +424,58 @@ class TestHDP26StackAdvisor(TestCase):
                                                               'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
                       )
 
+
+  def test_recommendDruidConfigurations_property_existence_check(self):
+      # Test for https://issues.apache.org/jira/browse/AMBARI-19144
+      hosts = {
+        "items": [
+          {
+            "Hosts": {
+              "cpu_count": 4,
+              "total_mem": 50331648,
+              "disk_info": [
+                {"mountpoint": "/"},
+                {"mountpoint": "/dev/shm"},
+                {"mountpoint": "/vagrant"},
+                {"mountpoint": "/"},
+                {"mountpoint": "/dev/shm"},
+                {"mountpoint": "/vagrant"}
+              ],
+              "public_host_name": "c6401.ambari.apache.org",
+              "host_name": "c6401.ambari.apache.org"
+            }
+          }
+        ]
+      }
+
+      services = {
+        "Versions": {
+          "parent_stack_version": "2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.6",
+          "stack_hierarchy": {
+            "stack_name": "HDP",
+            "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+          }
+        },
+        "services": [{
+        }
+        ],
+        "configurations": {
+        }
+      }
+
+      clusterData = {
+      }
+
+      configurations = {
+      }
+
+      self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+      self.assertEquals(configurations,
+                        {}
+                        )
+
   def test_recommendDruidConfigurations_heterogeneous_hosts(self):
     hosts = {
       "items": [


[14/51] [abbrv] ambari git commit: AMBARI-19058. Perf: Deploy 3000 Agent cluster and find perf bugs. Part 2.(vbrodetskyi)

Posted by sm...@apache.org.
AMBARI-19058. Perf: Deploy 3000 Agent cluster and find perf bugs. Part 2.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1238674f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1238674f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1238674f

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 1238674fa500c67ea11d6e929821587043eb6387
Parents: eb04efb
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Thu Dec 8 14:05:42 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Thu Dec 8 14:05:42 2016 +0200

----------------------------------------------------------------------
 contrib/utils/perf/deploy-gce-perf-cluster.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1238674f/contrib/utils/perf/deploy-gce-perf-cluster.py
----------------------------------------------------------------------
diff --git a/contrib/utils/perf/deploy-gce-perf-cluster.py b/contrib/utils/perf/deploy-gce-perf-cluster.py
index 6de3938..fe5f22d 100644
--- a/contrib/utils/perf/deploy-gce-perf-cluster.py
+++ b/contrib/utils/perf/deploy-gce-perf-cluster.py
@@ -280,13 +280,13 @@ def create_vms(args, number_of_nodes):
   :param number_of_nodes: Number of VMs to request.
   """
   print "Creating server VM {0}-server-{1} with xxlarge nodes on centos6...".format(cluster_prefix, args.cluster_suffix)
-  execute_command(args, args.controller, "/usr/sbin/gce up {0}-server-{1} 1 --centos6 --xxlarge".format(cluster_prefix, args.cluster_suffix),
+  execute_command(args, args.controller, "/usr/sbin/gce up {0}-server-{1} 1 --centos6 --xxlarge --ex --disk-xxlarge".format(cluster_prefix, args.cluster_suffix),
                   "Failed to create server, probably not enough resources!", "-tt")
   time.sleep(10)
 
   # trying to create cluster with needed params
   print "Creating agent VMs {0}-agent-{1} with {2} xlarge nodes on centos6...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
-  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --xlarge".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
+  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --xlarge --ex --disk-large".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
                   "Failed to create cluster VMs, probably not enough resources!", "-tt")
 
   # VMs are not accessible immediately
@@ -357,6 +357,7 @@ def create_server_script(server_host_name):
   "sed -i -e 's/local.database.user=postgres//g' /etc/ambari-server/conf/ambari.properties\n" + \
   "sed -i -e 's/server.jdbc.postgres.schema=ambari//g' /etc/ambari-server/conf/ambari.properties\n" + \
   "sed -i -e 's/false/true/g' /var/lib/ambari-server/resources/stacks/PERF/1.0/metainfo.xml\n" + \
+  "sed -i -e 's/-Xmx2048m/-Xmx16384m/g' /var/lib/ambari-server/ambari-env.sh\n" + \
   "\n" + \
   "echo 'server.jdbc.driver=com.mysql.jdbc.Driver' >> /etc/ambari-server/conf/ambari.properties\n" + \
   "echo 'server.jdbc.rca.url=jdbc:mysql://{0}:3306/ambari' >> /etc/ambari-server/conf/ambari.properties\n".format(server_host_name) + \


[22/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
deleted file mode 100644
index 5019447..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
+++ /dev/null
@@ -1,132 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for MAPREDUCE service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "1",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-mr-scheduler",
-                  "display-name": "MapReduce",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "3",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-mr-scheduler-row1-col1",
-                      "display-name": "MapReduce Framework",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row1-col2",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row1-col3",
-                      "row-index": "0",
-                      "column-index": "2",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row2-col1",
-                      "display-name": "MapReduce AppMaster",
-                      "row-index": "1",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "3"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "mapred-site/mapreduce.map.memory.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col1"
-        },
-        {
-          "config": "mapred-site/mapreduce.reduce.memory.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col2"
-        },
-        {
-          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
-          "subsection-name": "subsection-mr-scheduler-row2-col1"
-        },
-        {
-          "config": "mapred-site/mapreduce.task.io.sort.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col3"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "mapred-site/mapreduce.map.memory.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/mapreduce.reduce.memory.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/mapreduce.task.io.sort.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
deleted file mode 100644
index 758cf0c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
+++ /dev/null
@@ -1,250 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for YARN service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "3",
-              "tab-rows": "2",
-              "sections": [
-                {
-                  "name": "section-nm-sizing",
-                  "display-name": "Memory",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "2",
-                  "section-columns": "2",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-nm-sizing-col1",
-                      "display-name": "Node",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-nm-sizing-col2",
-                      "display-name": "Container",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-yarn-platform-features",
-                  "display-name": "YARN Features",
-                  "row-index": "0",
-                  "column-index": "2",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-yarn-platform-features-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-container-sizing",
-                  "display-name": "CPU",
-                  "row-index": "1",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "2",
-                  "section-columns": "2",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-container-sizing-col1",
-                      "display-name": "Node",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-container-sizing-col2",
-                      "display-name": "Container",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
-          "subsection-name": "subsection-nm-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
-          "subsection-name": "subsection-nm-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
-          "subsection-name": "subsection-nm-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.node-labels.enabled",
-          "subsection-name": "subsection-yarn-platform-features-col1"
-        },
-        {
-          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
-          "subsection-name": "subsection-yarn-platform-features-col1"
-        },
-        {
-          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-env/yarn_cgroups_enabled",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
-          "subsection-name": "subsection-container-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
-          "subsection-name": "subsection-container-sizing-col2"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.node-labels.enabled",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "yarn-env/yarn_cgroups_enabled",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
-        "widget": {
-          "type": "toggle"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
index 5d4fb4d..50cb18d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
@@ -20,10 +20,8 @@
   <services>
     <service>
       <name>ZOOKEEPER</name>
-      <displayName>ZooKeeper</displayName>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.6.3.0</version>
-      <extends>common-services/ZOOKEEPER/3.4.6</extends>
+      <version>3.4.9.3.0</version>
+      <extends>common-services/ZOOKEEPER/3.4.9</extends>
 
       <osSpecifics>
         <osSpecific>


[50/51] [abbrv] ambari git commit: AMBARI-19154. RAT check failure in HDFS/3.0.0/package/scripts/balancer-emulator (alejandro)

Posted by sm...@apache.org.
AMBARI-19154. RAT check failure in HDFS/3.0.0/package/scripts/balancer-emulator (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/93bc5d81
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/93bc5d81
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/93bc5d81

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 93bc5d8192de9860940ba938986e33d07d3d01aa
Parents: 739c460
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Fri Dec 9 11:39:09 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Fri Dec 9 11:39:09 2016 -0800

----------------------------------------------------------------------
 ambari-server/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/93bc5d81/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 08757a2..6307a14 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -286,6 +286,8 @@
             <exclude>src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/*.txt</exclude>
             <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log</exclude>
+            <exclude>src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer-err.log</exclude>
+            <exclude>src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>


[49/51] [abbrv] ambari git commit: AMBARI-19139. UI: Quick Link from Falcon has not correct name (Now: Falcon Dashboard), but before was 'Falcon Web UI' (alexantonenko)

Posted by sm...@apache.org.
AMBARI-19139. UI: Quick Link from Falcon has not correct name (Now: Falcon Dashboard), but before was 'Falcon Web UI' (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/739c4606
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/739c4606
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/739c4606

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 739c4606e2732c3c50af58f4acc66b98d4f04a48
Parents: d22422b
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Dec 8 20:10:13 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Dec 9 20:15:56 2016 +0200

----------------------------------------------------------------------
 .../common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/739c4606/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
index cc81fca..152ff57 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
@@ -16,8 +16,8 @@
 
     "links": [
       {
-        "name": "falcon_dashboard",
-        "label": "Falcon Dashboard",
+        "name": "falcon_web_ui",
+        "label": "Falcon Web UI",
         "requires_user_name": "true",
         "component_name": "FALCON_SERVER",
         "url": "%@://%@:%@/",


[11/51] [abbrv] ambari git commit: AMBARI-18737 Perf: Allow Kerberizing the PERF stack (dsen)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
new file mode 100644
index 0000000..199e6d7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/utils.py
@@ -0,0 +1,105 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+def get_property_value(dictionary, property_name, default_value=None, trim_string=False,
+                       empty_value=""):
+  """
+  Get a property value from a dictionary, applying applying rules as necessary.
+
+  If dictionary does not contain a value for property_name or the value for property_name is None,
+  null_value is used as the value to return.  Then, if trim_string is True and the value is None
+  or the value is an empty string, empty_value will be return else the (current) value is returned.
+
+  Note: the property value will most likely be a string or a unicode string, however in the event
+  it is not (for example a number), this method will behave properly and return the value as is.
+
+  :param dictionary: a dictionary of values
+  :param property_name: the name of a dictionary item to retrieve
+  :param default_value: the value to use if the item is not in the dictionary or the value of the item is None
+  :param trim_string: a Boolean value indicating whether to strip whitespace from the value (True) or not (False)
+  :param empty_value: the value to use if the (current) value is None or an empty string, if trim_string is True
+  :return: the requested property value with rules applied
+  """
+  # If property_name is not in the dictionary, set value to null_value
+  if property_name in dictionary:
+    value = dictionary[property_name]
+    if value is None:
+      value = default_value
+  else:
+    value = default_value
+
+  if trim_string:
+    # If the value is none, consider it empty...
+    if value is None:
+      value = empty_value
+    elif (type(value) == str) or (type(value) == unicode):
+      value = value.strip()
+
+      if len(value) == 0:
+        value = empty_value
+
+  return value
+
+def get_unstructured_data(dictionary, property_name):
+  prefix = property_name + '/'
+  prefix_len = len(prefix)
+  return dict((k[prefix_len:], v) for k, v in dictionary.iteritems() if k.startswith(prefix))
+
+def split_host_and_port(host):
+  """
+  Splits a string into its host and port components
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :return: a Dictionary containing 'host' and 'port' entries for the input value
+  """
+
+  if host is None:
+    host_and_port = None
+  else:
+    host_and_port = {}
+    parts = host.split(":")
+
+    if parts is not None:
+      length = len(parts)
+
+      if length > 0:
+        host_and_port['host'] = parts[0]
+
+        if length > 1:
+          host_and_port['port'] = int(parts[1])
+
+  return host_and_port
+
+def set_port(host, port):
+  """
+  Sets the port for a host specification, potentially replacing an existing port declaration
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :param port: a string or integer declaring the (new) port
+  :return: a string declaring the new host/port specification
+  """
+  if port is None:
+    return host
+  else:
+    host_and_port = split_host_and_port(host)
+
+    if (host_and_port is not None) and ('host' in host_and_port):
+      return "%s:%s" % (host_and_port['host'], port)
+    else:
+      return host

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2 b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
new file mode 100644
index 0000000..0191953
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/templates/krb5_conf.j2
@@ -0,0 +1,54 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  default_ccache_name = /tmp/krb5cc_%{uid}
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+{% if domains %}
+[domain_realm]
+{%- for domain in domains.split(',') %}
+  {{domain|trim()}} = {{realm}}
+{%- endfor %}
+{% endif %}
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts > 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length > 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
index 67762a5..5b6f2f6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>sleepy_user</name>
+    <display-name>sleepy User</display-name>
+    <value>sleepy</value>
+    <property-type>USER</property-type>
+    <description>sleepy Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
new file mode 100644
index 0000000..45e33f5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "SLEEPY",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "sleepy",
+          "principal": {
+            "value": "${sleepy-site/sleepy_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "sleepy-site/sleepy_principal_name",
+            "local_username": "${sleepy-site/sleepy_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/sleepy.headless.keytab",
+            "owner": {
+              "name": "${sleepy-site/sleepy_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "sleepy-site/sleepy_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "SLEEPY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "sleepy_sleepy",
+              "principal": {
+                "value": "sleepy/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "sleepy-site/sleepy.sleepy.kerberos.principal",
+                "local_username": "${sleepy-site/sleepy_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/sleepy.service.keytab",
+                "owner": {
+                  "name": "${sleepy-site/sleepy_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "sleepy-site/sleepy.sleepy.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "sleepy-site/sleepy.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "sleepy-site/sleepy.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
index 370d03d..699e35b 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
@@ -33,6 +33,10 @@ class Sleepy(Dummy):
   def __init__(self):
     super(Sleepy, self).__init__()
     self.component_name = "SLEEPY"
+    self.principal_conf_name = "sleepy-site"
+    self.principal_name = "sleepy.sleepy.kerberos.principal"
+    self.keytab_conf_name = "sleepy-site"
+    self.keytab_name = "sleepy.sleepy.keytab.file"
 
 if __name__ == "__main__":
   Sleepy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
index 67762a5..dfad0ac 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/configuration/snow-site.xml
@@ -33,4 +33,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>snow_user</name>
+    <display-name>snow User</display-name>
+    <value>snow</value>
+    <property-type>USER</property-type>
+    <description>snow Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
new file mode 100644
index 0000000..47a979a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "SNOW",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "snow",
+          "principal": {
+            "value": "${snow-site/snow_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "snow-site/snow_principal_name",
+            "local_username": "${snow-site/snow_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/snow.headless.keytab",
+            "owner": {
+              "name": "${snow-site/snow_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "snow-site/snow_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+      ],
+      "components": [
+        {
+          "name": "SNOW_WHITE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "snow_white_snow",
+              "principal": {
+                "value": "snow/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "snow-site/snow.white.kerberos.principal",
+                "local_username": "${snow-site/snow_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/snow.service.keytab",
+                "owner": {
+                  "name": "${snow-site/snow_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "snow-site/snow.white.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "snow-site/snow.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "snow-site/snow.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
index 41bfa8a..df2b81d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/package/scripts/snow_white.py
@@ -33,6 +33,10 @@ class SnowWhite(Dummy):
   def __init__(self):
     super(SnowWhite, self).__init__()
     self.component_name = "SNOW_WHITE"
+    self.principal_conf_name = "snow-site"
+    self.principal_name = "snow.white.kerberos.principal"
+    self.keytab_conf_name = "snow-site"
+    self.keytab_name = "snow.white.keytab.file"
 
 if __name__ == "__main__":
   SnowWhite().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
index 4b32de7..d179b1f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/application_timeline_server.py
@@ -33,6 +33,10 @@ class ApplicationTimelineServer(Dummy):
   def __init__(self):
     super(ApplicationTimelineServer, self).__init__()
     self.component_name = "APP_TIMELINE_SERVER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.timeline-service.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.timeline-service.keytab"
 
 if __name__ == "__main__":
   ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
index cba85b5..0570987 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/historyserver.py
@@ -33,6 +33,10 @@ class HistoryServer(Dummy):
   def __init__(self):
     super(HistoryServer, self).__init__()
     self.component_name = "HISTORYSERVER"
+    self.principal_conf_name = "mapred-site"
+    self.principal_name = "mapreduce.jobhistory.principal"
+    self.keytab_conf_name = "mapred-site"
+    self.keytab_name = "mapreduce.jobhistory.keytab"
 
 if __name__ == "__main__":
   HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
index 883c3ad..21db94f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/nodemanager.py
@@ -33,6 +33,10 @@ class Nodemanager(Dummy):
   def __init__(self):
     super(Nodemanager, self).__init__()
     self.component_name = "NODEMANAGER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.nodemanager.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.nodemanager.keytab"
 
 if __name__ == "__main__":
   Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
index 7f80077..5f6c535 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/package/scripts/resourcemanager.py
@@ -33,6 +33,10 @@ class Resourcemanager(Dummy):
   def __init__(self):
     super(Resourcemanager, self).__init__()
     self.component_name = "RESOURCEMANAGER"
+    self.principal_conf_name = "yarn-site"
+    self.principal_name = "yarn.resourcemanager.principal"
+    self.keytab_conf_name = "yarn-site"
+    self.keytab_name = "yarn.resourcemanager.keytab"
 
   def decommission(self, env):
     print "Decommission"

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
new file mode 100644
index 0000000..0a64ea5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/kerberos.json
@@ -0,0 +1,39 @@
+{
+  "services": [
+    {
+      "name": "ZOOKEEPER",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZOOKEEPER_SERVER",
+          "identities": [
+            {
+              "name": "zookeeper_zk",
+              "principal": {
+                "value": "zookeeper/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "zookeeper-env/zookeeper_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/zk.service.keytab",
+                "owner": {
+                  "name": "${zookeeper-env/zk_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "zookeeper-env/zookeeper_keytab_path"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/338c2c5b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
index 92519ba..6ab88bb 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -33,6 +33,10 @@ class ZookeeperServer(Dummy):
   def __init__(self):
     super(ZookeeperServer, self).__init__()
     self.component_name = "ZOOKEEPER_SERVER"
+    self.principal_conf_name = "zookeeper-env"
+    self.principal_name = "zookeeper_principal_name"
+    self.keytab_conf_name = "zookeeper-env"
+    self.keytab_name = "zookeeper_keytab_path"
 
 if __name__ == "__main__":
   ZookeeperServer().execute()


[20/51] [abbrv] ambari git commit: AMBARI-19134 : Storm start is failing due to ClassNotFoundException org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter. (avijayan)

Posted by sm...@apache.org.
AMBARI-19134 : Storm start is failing due to ClassNotFoundException org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter. (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/82f9401b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/82f9401b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/82f9401b

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 82f9401be0feb30ef59e6c02e8715fe4a05f6f9e
Parents: 51c6ef9
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Thu Dec 8 12:02:41 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Thu Dec 8 12:02:41 2016 -0800

----------------------------------------------------------------------
 .../metrics2/sink/timeline/AbstractTimelineMetricsSink.java     | 5 +++++
 .../sink/timeline/cache/HandleConnectExceptionTest.java         | 4 ++++
 2 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/82f9401b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index f92f968..9bc3be5 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -222,6 +222,11 @@ public abstract class AbstractTimelineMetricsSink {
       collectorHost = findPreferredCollectHost();
     }
 
+    if (collectorHost == null) {
+      LOG.warn("No live collector to send metrics to. Metrics to be sent will be discarded.");
+      return false;
+    }
+
     String connectUrl = getCollectorUri(collectorHost);
     String jsonData = null;
     LOG.debug("EmitMetrics connectUrl = "  + connectUrl);

http://git-wip-us.apache.org/repos/asf/ambari/blob/82f9401b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
index 36ec074..32fe32e 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
@@ -130,6 +130,10 @@ public class HandleConnectExceptionTest {
       return super.emitMetrics(metrics);
     }
 
+    @Override
+    protected synchronized String findPreferredCollectHost() {
+      return "localhost";
+    }
 
   }
 }


[17/51] [abbrv] ambari git commit: AMBARI-19075. Cookie management for Ambari LogSearch Integration (oleewere)

Posted by sm...@apache.org.
AMBARI-19075. Cookie management for Ambari LogSearch Integration (oleewere)

Change-Id: I67395f02705d296e7b1b0dced2fffde69d92482d


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a9a05f76
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a9a05f76
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a9a05f76

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: a9a05f76f07df992e4216a2c9e27736239bfdb9a
Parents: 8bdb745
Author: oleewere <ol...@gmail.com>
Authored: Thu Dec 8 00:02:50 2016 +0100
Committer: oleewere <ol...@gmail.com>
Committed: Thu Dec 8 16:28:11 2016 +0100

----------------------------------------------------------------------
 .../org/apache/ambari/logsearch/LogSearch.java  |  4 +-
 .../web/listener/LogSearchSessionListener.java  | 48 ++++++++++++++++++++
 .../docker/test-config/logsearch/log4j.xml      |  2 +-
 .../controller/logging/LoggingCookieStore.java  | 44 ++++++++++++++++++
 .../logging/LoggingRequestHelperImpl.java       | 42 +++++++++++++++--
 .../0.5.0/properties/logsearch-log4j.xml.j2     |  4 +-
 .../logging/LoggingRequestHelperImplTest.java   |  5 ++
 7 files changed, 141 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
index 2c3f4f5..14f83cf 100644
--- a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
+++ b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/LogSearch.java
@@ -30,6 +30,7 @@ import org.apache.ambari.logsearch.common.ManageStartEndTime;
 import org.apache.ambari.logsearch.common.PropertiesHelper;
 import org.apache.ambari.logsearch.conf.ApplicationConfig;
 import org.apache.ambari.logsearch.util.SSLUtil;
+import org.apache.ambari.logsearch.web.listener.LogSearchSessionListener;
 import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 import org.eclipse.jetty.server.Connector;
@@ -66,7 +67,7 @@ public class LogSearch {
 
   private static final String WEB_RESOURCE_FOLDER = "webapps/app";
   private static final String ROOT_CONTEXT = "/";
-  private static final Integer SESSION_TIMEOUT = 30;
+  private static final Integer SESSION_TIMEOUT = 60 * 30;
 
 
   public static void main(String[] argv) {
@@ -136,6 +137,7 @@ public class LogSearch {
     context.setBaseResource(Resource.newResource(webResourceBase));
     context.setContextPath(ROOT_CONTEXT);
     context.setParentLoaderPriority(true);
+    context.addEventListener(new LogSearchSessionListener());
 
     // Configure Spring
     context.addEventListener(new ContextLoaderListener());

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
new file mode 100644
index 0000000..9fa5c80
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-portal/src/main/java/org/apache/ambari/logsearch/web/listener/LogSearchSessionListener.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.web.listener;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpSessionEvent;
+import javax.servlet.http.HttpSessionListener;
+
+public class LogSearchSessionListener implements HttpSessionListener {
+
+  private Logger LOG = LoggerFactory.getLogger(LogSearchSessionListener.class);
+
+  private int numberOfSessions = 0;
+
+  @Override
+  public void sessionCreated(HttpSessionEvent event) {
+    synchronized (this) {
+      numberOfSessions++;
+    }
+    LOG.debug(String.format("New session is created (Id: %s). Number of sessions: %d", event.getSession().getId(), numberOfSessions));
+  }
+
+  @Override
+  public void sessionDestroyed(HttpSessionEvent event) {
+    synchronized (this) {
+      numberOfSessions--;
+    }
+    LOG.debug(String.format("Session destroyed (Id: %s). Number of sessions: %d", event.getSession().getId(), numberOfSessions));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-logsearch/docker/test-config/logsearch/log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/test-config/logsearch/log4j.xml b/ambari-logsearch/docker/test-config/logsearch/log4j.xml
index b80824b..d0e26ed 100644
--- a/ambari-logsearch/docker/test-config/logsearch/log4j.xml
+++ b/ambari-logsearch/docker/test-config/logsearch/log4j.xml
@@ -25,7 +25,7 @@
     <param name="maxFileSize" value="10MB" />
     <param name="maxBackupIndex" value="10" />
     <layout class="org.apache.log4j.PatternLayout">
-      <param name="ConversionPattern" value="%d [%t] %-5p %C{6} (%F:%L) - %m%n" />
+      <param name="ConversionPattern" value="%d %-5p [%t] %C{6} (%F:%L) - %m%n" />
     </layout>
   </appender>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
new file mode 100644
index 0000000..a779068
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingCookieStore.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.server.controller.logging;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Package protected singleton for storing Cookie key value pairs for Logging Service.
+ * This has chosen instead of using CookieManager to avoid using system wide Cookie handling
+ */
+class LoggingCookieStore {
+  public static final LoggingCookieStore INSTANCE = new LoggingCookieStore();
+
+  private final Map<String, String> cookiesMap = new HashMap<>();
+
+  private LoggingCookieStore() {
+  }
+
+  public Map<String, String> getCookiesMap() {
+    return cookiesMap;
+  }
+
+  public void addCookie(String cookieName, String cookieValue) {
+    cookiesMap.put(cookieName, cookieValue);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
index eab0c04..358c1b7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
@@ -26,6 +26,7 @@ import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
 import org.apache.http.client.utils.URIBuilder;
 import org.apache.log4j.Logger;
 import org.codehaus.jackson.map.AnnotationIntrospector;
@@ -39,11 +40,14 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.StringReader;
+import java.net.HttpCookie;
 import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -76,6 +80,10 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
 
   private static final String PAGE_SIZE_QUERY_PARAMETER_NAME = "pageSize";
 
+  private static final String COOKIE_HEADER = "Cookie";
+
+  private static final String SET_COOKIES_HEADER = "Set-Cookie";
+
   private static final int DEFAULT_LOGSEARCH_CONNECT_TIMEOUT_IN_MILLISECONDS = 5000;
 
   private static final int DEFAULT_LOGSEARCH_READ_TIMEOUT_IN_MILLISECONDS = 5000;
@@ -109,20 +117,20 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
       // use the Apache builder to create the correct URI
       URI logSearchURI = createLogSearchQueryURI("http", queryParameters);
       LOG.debug("Attempting to connect to LogSearch server at " + logSearchURI);
-
-      HttpURLConnection httpURLConnection  = (HttpURLConnection)logSearchURI.toURL().openConnection();
+      HttpURLConnection httpURLConnection  = (HttpURLConnection) logSearchURI.toURL().openConnection();
       httpURLConnection.setRequestMethod("GET");
       httpURLConnection.setConnectTimeout(DEFAULT_LOGSEARCH_CONNECT_TIMEOUT_IN_MILLISECONDS);
       httpURLConnection.setReadTimeout(DEFAULT_LOGSEARCH_READ_TIMEOUT_IN_MILLISECONDS);
 
+      addCookiesFromCookieStore(httpURLConnection);
 
       setupCredentials(httpURLConnection);
 
       StringBuffer buffer = networkConnection.readQueryResponseFromServer(httpURLConnection);
+      addCookiesToCookieStoreFromResponse(httpURLConnection);
 
       // setup a reader for the JSON response
-      StringReader stringReader =
-        new StringReader(buffer.toString());
+      StringReader stringReader = new StringReader(buffer.toString());
 
       ObjectReader logQueryResponseReader =
         createObjectReader(LogQueryResponse.class);
@@ -137,6 +145,27 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
     return null;
   }
 
+  private void addCookiesFromCookieStore(HttpURLConnection httpURLConnection) {
+    if (LoggingCookieStore.INSTANCE.getCookiesMap().size() > 0) {
+      List<String> cookiesStrList = new ArrayList<>();
+      for (Map.Entry<String, String> entry : LoggingCookieStore.INSTANCE.getCookiesMap().entrySet()) {
+        cookiesStrList.add(String.format("%s=%s", entry.getKey(), entry.getValue()));
+      }
+      httpURLConnection.setRequestProperty(COOKIE_HEADER, StringUtils.join(cookiesStrList, "; "));
+    }
+  }
+
+  private void addCookiesToCookieStoreFromResponse(HttpURLConnection httpURLConnection) {
+    Map<String, List<String>> headerFields = httpURLConnection.getHeaderFields();
+    List<String> cookiesHeader = headerFields.get(SET_COOKIES_HEADER);
+    if (cookiesHeader != null) {
+      for (String cookie : cookiesHeader) {
+        HttpCookie cookie1 = HttpCookie.parse(cookie).get(0);
+        LoggingCookieStore.INSTANCE.addCookie(cookie1.getName(), cookie1.getValue());
+      }
+    }
+  }
+
 
   private void setupCredentials(HttpURLConnection httpURLConnection) {
     final String logSearchAdminUser =
@@ -224,10 +253,14 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
       HttpURLConnection httpURLConnection = (HttpURLConnection) logLevelQueryURI.toURL().openConnection();
       httpURLConnection.setRequestMethod("GET");
 
+      addCookiesFromCookieStore(httpURLConnection);
+
       setupCredentials(httpURLConnection);
 
       StringBuffer buffer = networkConnection.readQueryResponseFromServer(httpURLConnection);
 
+      addCookiesToCookieStoreFromResponse(httpURLConnection);
+
       // setup a reader for the JSON response
       StringReader stringReader =
         new StringReader(buffer.toString());
@@ -374,6 +407,7 @@ public class LoggingRequestHelperImpl implements LoggingRequestHelper {
         BufferedReader reader = new BufferedReader(new InputStreamReader(resultStream));
         LOG.debug("Response code from LogSearch Service is = " + httpURLConnection.getResponseCode());
 
+
         String line = reader.readLine();
         StringBuffer buffer = new StringBuffer();
         while (line != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
index ce39030..06fdad2 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/properties/logsearch-log4j.xml.j2
@@ -25,7 +25,7 @@ limitations under the License.
   </appender>
 
   <appender name="rolling_file" class="org.apache.log4j.RollingFileAppender">
-    <param name="file" value="{{logsearch_log_dir}}/logsearch.err" />
+    <param name="file" value="{{logsearch_log_dir}}/logsearch.log" />
     <param name="Threshold" value="info" />
     <param name="append" value="true" />
     <param name="maxFileSize" value="10MB" />
@@ -74,7 +74,7 @@ limitations under the License.
   </category>
 
   <root>
-    <priority value="warn"/>
+    <priority value="info"/>
     <!-- <appender-ref ref="console" /> -->
     <appender-ref ref="rolling_file" />
     <appender-ref ref="rolling_file_json"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9a05f76/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
index b839b64..12b5b69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
@@ -122,6 +122,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogQueryRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     EasyMockSupport mockSupport =
       new EasyMockSupport();
 
@@ -301,6 +302,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogLevelRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     EasyMockSupport mockSupport =
       new EasyMockSupport();
 
@@ -391,6 +393,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testLogFileNameRequestBasic() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedComponentName = "hdfs_namenode";
 
     EasyMockSupport mockSupport =
@@ -478,6 +481,7 @@ public class LoggingRequestHelperImplTest {
    */
   @Test
   public void testLogQueryRequestBasicCredentialsNotInConfig() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedClusterName = "my-test-cluster";
 
     EasyMockSupport mockSupport =
@@ -656,6 +660,7 @@ public class LoggingRequestHelperImplTest {
 
   @Test
   public void testCreateLogFileTailURI() throws Exception {
+    LoggingCookieStore.INSTANCE.getCookiesMap().clear();
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedPort = "61888";
     final String expectedComponentName = "hdfs_namenode";


[03/51] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-18456' into trunk

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 9917720..fe1f338 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -17,18 +17,22 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.controller.internal.ConfigurationResourceProvider;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
@@ -44,213 +48,195 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
 
 public class ConfigGroupImpl implements ConfigGroup {
   private static final Logger LOG = LoggerFactory.getLogger(ConfigGroupImpl.class);
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
 
   private Cluster cluster;
-  private ConfigGroupEntity configGroupEntity;
-  private Map<Long, Host> hosts;
-  private Map<String, Config> configurations;
-  private volatile boolean isPersisted = false;
-
-  @Inject
-  private Gson gson;
-  @Inject
-  private ConfigGroupDAO configGroupDAO;
-  @Inject
-  private ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
-  @Inject
-  private ConfigGroupHostMappingDAO configGroupHostMappingDAO;
-  @Inject
-  private HostDAO hostDAO;
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  Clusters clusters;
+  private ConcurrentMap<Long, Host> m_hosts;
+  private ConcurrentMap<String, Config> m_configurations;
+  private String configGroupName;
+  private long configGroupId;
+
+  /**
+   * This lock is required to prevent inconsistencies in internal state between
+   * {@link #m_hosts} and the entities stored by the {@link ConfigGroupEntity}.
+   */
+  private final ReadWriteLock hostLock;
+
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String hostLockLabel = "configurationGroupHostLock";
+
+  private final ConfigGroupDAO configGroupDAO;
+
+  private final ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
+
+  private final ConfigGroupHostMappingDAO configGroupHostMappingDAO;
+
+  private final HostDAO hostDAO;
+
+  private final ClusterDAO clusterDAO;
+
+  private final ConfigFactory configFactory;
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
-                         @Assisted("name") String name,
-                         @Assisted("tag") String tag,
-                         @Assisted("description") String description,
-                         @Assisted("configs") Map<String, Config> configs,
-                         @Assisted("hosts") Map<Long, Host> hosts,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configurations,
+      @Assisted("hosts") Map<Long, Host> hosts, Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupName = name;
 
-    configGroupEntity = new ConfigGroupEntity();
+    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
     configGroupEntity.setClusterId(cluster.getClusterId());
     configGroupEntity.setGroupName(name);
     configGroupEntity.setTag(tag);
     configGroupEntity.setDescription(description);
 
-    if (hosts != null) {
-      this.hosts = hosts;
-    } else {
-      this.hosts = new HashMap<Long, Host>();
-    }
+    m_hosts = hosts == null ? new ConcurrentHashMap<Long, Host>()
+        : new ConcurrentHashMap<>(hosts);
 
-    if (configs != null) {
-      configurations = configs;
-    } else {
-      configurations = new HashMap<String, Config>();
-    }
+    m_configurations = configurations == null ? new ConcurrentHashMap<String, Config>()
+        : new ConcurrentHashMap<>(configurations);
+
+    // save the entity and grab the ID
+    persist(configGroupEntity);
+    configGroupId = configGroupEntity.getGroupId();
   }
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted Cluster cluster,
-                         @Assisted ConfigGroupEntity configGroupEntity,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted Cluster cluster, @Assisted ConfigGroupEntity configGroupEntity,
+      Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupId = configGroupEntity.getGroupId();
+    configGroupName = configGroupEntity.getGroupName();
 
-    this.configGroupEntity = configGroupEntity;
-    configurations = new HashMap<String, Config>();
-    hosts = new HashMap<Long, Host>();
+    m_configurations = new ConcurrentHashMap<String, Config>();
+    m_hosts = new ConcurrentHashMap<Long, Host>();
 
     // Populate configs
-    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity
-      .getConfigGroupConfigMappingEntities()) {
-
+    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity.getConfigGroupConfigMappingEntities()) {
       Config config = cluster.getConfig(configMappingEntity.getConfigType(),
         configMappingEntity.getVersionTag());
 
       if (config != null) {
-        configurations.put(config.getType(), config);
+        m_configurations.put(config.getType(), config);
       } else {
-        LOG.warn("Unable to find config mapping for config group"
-          + ", clusterName = " + cluster.getClusterName()
-          + ", type = " + configMappingEntity.getConfigType()
-          + ", tag = " + configMappingEntity.getVersionTag());
+        LOG.warn("Unable to find config mapping {}/{} for config group in cluster {}",
+            configMappingEntity.getConfigType(), configMappingEntity.getVersionTag(),
+            cluster.getClusterName());
       }
     }
 
     // Populate Hosts
-    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity
-      .getConfigGroupHostMappingEntities()) {
-
+    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity.getConfigGroupHostMappingEntities()) {
       try {
         Host host = clusters.getHost(hostMappingEntity.getHostname());
         HostEntity hostEntity = hostMappingEntity.getHostEntity();
         if (host != null && hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
+          m_hosts.put(hostEntity.getHostId(), host);
         }
       } catch (AmbariException e) {
-        String msg = "Host seems to be deleted but Config group mapping still " +
-          "exists !";
-        LOG.warn(msg);
-        LOG.debug(msg, e);
+        LOG.warn("Host seems to be deleted but Config group mapping still exists !");
+        LOG.debug("Host seems to be deleted but Config group mapping still exists !", e);
       }
     }
-
-    isPersisted = true;
   }
 
   @Override
   public Long getId() {
-    return configGroupEntity.getGroupId();
+    return configGroupId;
   }
 
   @Override
   public String getName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getGroupName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return configGroupName;
   }
 
   @Override
   public void setName(String name) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setGroupName(name);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setGroupName(name);
+    configGroupDAO.merge(configGroupEntity);
 
+    configGroupName = name;
   }
 
   @Override
   public String getClusterName() {
-    return configGroupEntity.getClusterEntity().getClusterName();
+    return cluster.getClusterName();
   }
 
   @Override
   public String getTag() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getTag();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getTag();
   }
 
   @Override
   public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setTag(tag);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setTag(tag);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public String getDescription() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getDescription();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getDescription();
   }
 
   @Override
   public void setDescription(String description) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setDescription(description);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setDescription(description);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public Map<Long, Host> getHosts() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(hosts);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return Collections.unmodifiableMap(m_hosts);
   }
 
   @Override
   public Map<String, Config> getConfigurations() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(configurations);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return Collections.unmodifiableMap(m_configurations);
   }
 
   /**
@@ -259,13 +245,14 @@ public class ConfigGroupImpl implements ConfigGroup {
    */
   @Override
   public void setHosts(Map<Long, Host> hosts) {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      this.hosts = hosts;
+      // persist enitites in a transaction first, then update internal state
+      replaceHostMappings(hosts);
+      m_hosts = new ConcurrentHashMap<>(hosts);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
-
   }
 
   /**
@@ -273,115 +260,140 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @param configs
    */
   @Override
-  public void setConfigurations(Map<String, Config> configs) {
-    readWriteLock.writeLock().lock();
-    try {
-      configurations = configs;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+  public void setConfigurations(Map<String, Config> configurations) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ClusterEntity clusterEntity = configGroupEntity.getClusterEntity();
+
+    // only update the internal state after the configurations have been
+    // persisted
+    persistConfigMapping(clusterEntity, configGroupEntity, configurations);
+    m_configurations = new ConcurrentHashMap<>(configurations);
   }
 
   @Override
-  @Transactional
   public void removeHost(Long hostId) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts.containsKey(hostId)) {
-        String hostName = hosts.get(hostId).getHostName();
-        LOG.info("Removing host from config group, hostid = " + hostId + ", hostname = " + hostName);
-        hosts.remove(hostId);
-        try {
-          ConfigGroupHostMappingEntityPK hostMappingEntityPK = new
-            ConfigGroupHostMappingEntityPK();
-          hostMappingEntityPK.setHostId(hostId);
-          hostMappingEntityPK.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupHostMappingDAO.removeByPK(hostMappingEntityPK);
-        } catch (Exception e) {
-          LOG.error("Failed to delete config group host mapping"
-            + ", clusterName = " + getClusterName()
-            + ", id = " + getId()
-            + ", hostid = " + hostId
-            + ", hostname = " + hostName, e);
-          throw new AmbariException(e.getMessage());
-        }
+      Host host = m_hosts.get(hostId);
+      if (null == host) {
+        return;
       }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
 
-  @Override
-  public void persist() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!isPersisted) {
-        persistEntities();
-        refresh();
-        cluster.refresh();
-        isPersisted = true;
-      } else {
-        saveIfPersisted();
+      String hostName = host.getHostName();
+      LOG.info("Removing host (id={}, name={}) from config group", host.getHostId(), hostName);
+
+      try {
+        // remove the entities first, then update internal state
+        removeConfigGroupHostEntity(host);
+        m_hosts.remove(hostId);
+      } catch (Exception e) {
+        LOG.error("Failed to delete config group host mapping for cluster {} and host {}",
+            cluster.getClusterName(), hostName, e);
+
+        throw new AmbariException(e.getMessage());
       }
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
   /**
+   * Removes the {@link ConfigGroupHostMappingEntity} for the specified host
+   * from this configuration group.
+   *
+   * @param host
+   *          the host to remove.
+   */
+  @Transactional
+  void removeConfigGroupHostEntity(Host host) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupHostMappingEntityPK hostMappingEntityPK = new ConfigGroupHostMappingEntityPK();
+    hostMappingEntityPK.setHostId(host.getHostId());
+    hostMappingEntityPK.setConfigGroupId(configGroupId);
+
+    ConfigGroupHostMappingEntity configGroupHostMapping = configGroupHostMappingDAO.findByPK(
+        hostMappingEntityPK);
+
+    configGroupHostMappingDAO.remove(configGroupHostMapping);
+
+    configGroupEntity.getConfigGroupHostMappingEntities().remove(configGroupHostMapping);
+    configGroupEntity = configGroupDAO.merge(getConfigGroupEntity());
+  }
+
+  /**
+   * @param configGroupEntity
+   */
+  private void persist(ConfigGroupEntity configGroupEntity) {
+    persistEntities(configGroupEntity);
+    cluster.refresh();
+  }
+
+  /**
    * Persist Config group with host mapping and configurations
    *
    * @throws Exception
    */
   @Transactional
-  void persistEntities() {
+  void persistEntities(ConfigGroupEntity configGroupEntity) {
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
     configGroupEntity.setClusterEntity(clusterEntity);
     configGroupEntity.setTimestamp(System.currentTimeMillis());
     configGroupDAO.create(configGroupEntity);
 
-    persistConfigMapping(clusterEntity);
-    persistHostMapping();
-  }
+    configGroupId = configGroupEntity.getGroupId();
 
-  // TODO: Test rollback scenario
+    persistConfigMapping(clusterEntity, configGroupEntity, m_configurations);
+    replaceHostMappings(m_hosts);
+  }
 
   /**
-   * Persist host mapping
+   * Replaces all existing host mappings with the new collection of hosts.
    *
+   * @param the
+   *          new hosts
    * @throws Exception
    */
-  @Override
   @Transactional
-  public void persistHostMapping() {
-    if (isPersisted) {
-      // Delete existing mappings and create new ones
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupHostMappingEntities(new HashSet<ConfigGroupHostMappingEntity>());
-    }
+  void replaceHostMappings(Map<Long, Host> hosts) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+
+    // Delete existing mappings and create new ones
+    configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupHostMappingEntities(
+        new HashSet<ConfigGroupHostMappingEntity>());
 
     if (hosts != null && !hosts.isEmpty()) {
-      for (Host host : hosts.values()) {
-        HostEntity hostEntity = hostDAO.findById(host.getHostId());
-        if (hostEntity != null) {
-          ConfigGroupHostMappingEntity hostMappingEntity = new
-            ConfigGroupHostMappingEntity();
-          hostMappingEntity.setHostId(hostEntity.getHostId());
-          hostMappingEntity.setHostEntity(hostEntity);
-          hostMappingEntity.setConfigGroupEntity(configGroupEntity);
-          hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupEntity.getConfigGroupHostMappingEntities().add
-                  (hostMappingEntity);
-          configGroupHostMappingDAO.create(hostMappingEntity);
-        } else {
-          LOG.warn("Host seems to be deleted, cannot create host to config " +
-            "group mapping, host = " + host.getHostName());
-        }
+      configGroupEntity = persistHostMapping(hosts.values(), configGroupEntity);
+    }
+  }
+
+  /**
+   * Adds the collection of hosts to the configuration group.
+   *
+   * @param hostEntity
+   * @param configGroupEntity
+   */
+  @Transactional
+  ConfigGroupEntity persistHostMapping(Collection<Host> hosts,
+      ConfigGroupEntity configGroupEntity) {
+    for (Host host : hosts) {
+      HostEntity hostEntity = hostDAO.findById(host.getHostId());
+      if (hostEntity != null) {
+        ConfigGroupHostMappingEntity hostMappingEntity = new ConfigGroupHostMappingEntity();
+        hostMappingEntity.setHostId(hostEntity.getHostId());
+        hostMappingEntity.setHostEntity(hostEntity);
+        hostMappingEntity.setConfigGroupEntity(configGroupEntity);
+        hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
+        configGroupEntity.getConfigGroupHostMappingEntities().add(hostMappingEntity);
+        configGroupHostMappingDAO.create(hostMappingEntity);
+      } else {
+        LOG.warn(
+            "The host {} has been removed from the cluster and cannot be added to the configuration group {}",
+            host.getHostName(), configGroupName);
       }
     }
-    // TODO: Make sure this does not throw Nullpointer based on JPA docs
-    configGroupEntity = configGroupDAO.merge(configGroupEntity);
+
+    return configGroupDAO.merge(configGroupEntity);
   }
 
   /**
@@ -391,42 +403,31 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @throws Exception
    */
   @Transactional
-  void persistConfigMapping(ClusterEntity clusterEntity) {
-    if (isPersisted) {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupConfigMappingEntities(new HashSet<ConfigGroupConfigMappingEntity>());
-    }
+  void persistConfigMapping(ClusterEntity clusterEntity,
+      ConfigGroupEntity configGroupEntity, Map<String, Config> configurations) {
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupConfigMappingEntities(
+        new HashSet<ConfigGroupConfigMappingEntity>());
 
     if (configurations != null && !configurations.isEmpty()) {
-      for (Config config : configurations.values()) {
+      for (Entry<String, Config> entry : configurations.entrySet()) {
+        Config config = entry.getValue();
         ClusterConfigEntity clusterConfigEntity = clusterDAO.findConfig
           (cluster.getClusterId(), config.getType(), config.getTag());
 
         if (clusterConfigEntity == null) {
-          config.setVersion(cluster.getNextConfigVersion(config.getType()));
-          config.setStackId(cluster.getDesiredStackVersion());
-          // Create configuration
-          clusterConfigEntity = new ClusterConfigEntity();
-          clusterConfigEntity.setClusterId(clusterEntity.getClusterId());
-          clusterConfigEntity.setClusterEntity(clusterEntity);
-          clusterConfigEntity.setStack(clusterEntity.getDesiredStack());
-          clusterConfigEntity.setType(config.getType());
-          clusterConfigEntity.setVersion(config.getVersion());
-          clusterConfigEntity.setTag(config.getTag());
-          clusterConfigEntity.setData(gson.toJson(config.getProperties()));
-          if (null != config.getPropertiesAttributes()) {
-            clusterConfigEntity.setAttributes(gson.toJson(config.getPropertiesAttributes()));
-          }
-          clusterConfigEntity.setTimestamp(System.currentTimeMillis());
-          clusterDAO.createConfig(clusterConfigEntity);
-          clusterEntity.getClusterConfigEntities().add(clusterConfigEntity);
-          cluster.addConfig(config);
-          clusterDAO.merge(clusterEntity);
-          cluster.refresh();
+          config = configFactory.createNew(cluster, config.getType(), config.getTag(),
+              config.getProperties(), config.getPropertiesAttributes());
+
+          entry.setValue(config);
+
+          clusterConfigEntity = clusterDAO.findConfig(cluster.getClusterId(), config.getType(),
+              config.getTag());
         }
 
         ConfigGroupConfigMappingEntity configMappingEntity =
           new ConfigGroupConfigMappingEntity();
+
         configMappingEntity.setTimestamp(System.currentTimeMillis());
         configMappingEntity.setClusterId(clusterEntity.getClusterId());
         configMappingEntity.setClusterConfigEntity(clusterConfigEntity);
@@ -443,142 +444,84 @@ public class ConfigGroupImpl implements ConfigGroup {
     }
   }
 
-  void saveIfPersisted() {
-    if (isPersisted) {
-      save(clusterDAO.findById(cluster.getClusterId()));
-    }
-  }
-
-  @Transactional
-  void save(ClusterEntity clusterEntity) {
-    persistHostMapping();
-    persistConfigMapping(clusterEntity);
-  }
-
   @Override
+  @Transactional
   public void delete() {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupDAO.removeByPK(configGroupEntity.getGroupId());
-      cluster.refresh();
-      isPersisted = false;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupId);
+    configGroupHostMappingDAO.removeAllByGroup(configGroupId);
+    configGroupDAO.removeByPK(configGroupId);
+    cluster.refresh();
   }
 
   @Override
   public void addHost(Host host) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts != null && !hosts.isEmpty()) {
-        for (Host h : hosts.values()) {
-          if (h.getHostName().equals(host.getHostName())) {
-            throw new DuplicateResourceException("Host " + h.getHostName() +
-              "is already associated with Config Group " +
-              configGroupEntity.getGroupName());
-          }
-        }
-        HostEntity hostEntity = hostDAO.findByName(host.getHostName());
-        if (hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
+      if (m_hosts.containsKey(host.getHostId())) {
+        String message = String.format(
+            "Host %s is already associated with the configuration group %s", host.getHostName(),
+            configGroupName);
 
-  @Override
-  public void addConfiguration(Config config) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      if (configurations != null && !configurations.isEmpty()) {
-        for (Config c : configurations.values()) {
-          if (c.getType().equals(config.getType()) && c.getTag().equals
-            (config.getTag())) {
-            throw new DuplicateResourceException("Config " + config.getType() +
-              " with tag " + config.getTag() + " is already associated " +
-              "with Config Group " + configGroupEntity.getGroupName());
-          }
-        }
-        configurations.put(config.getType(), config);
+        throw new DuplicateResourceException(message);
       }
+
+      // ensure that we only update the in-memory structure if the merge was
+      // successful
+      ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+      persistHostMapping(Collections.singletonList(host), configGroupEntity);
+      m_hosts.putIfAbsent(host.getHostId(), host);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
   @Override
   public ConfigGroupResponse convertToResponse() throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
-      for (Host host : hosts.values()) {
-        Map<String, Object> hostMap = new HashMap<String, Object>();
-        hostMap.put("host_name", host.getHostName());
-        hostnames.add(hostMap);
-      }
-
-      Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
+    Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+    for (Host host : m_hosts.values()) {
+      Map<String, Object> hostMap = new HashMap<String, Object>();
+      hostMap.put("host_name", host.getHostName());
+      hostnames.add(hostMap);
+    }
 
-      for (Config config : configurations.values()) {
-        Map<String, Object> configMap = new HashMap<String, Object>();
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
-            config.getType());
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
-            config.getTag());
-        configObjMap.add(configMap);
-      }
+    Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
 
-      ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
-          configGroupEntity.getGroupId(), cluster.getClusterName(),
-          configGroupEntity.getGroupName(), configGroupEntity.getTag(),
-          configGroupEntity.getDescription(), hostnames, configObjMap);
-      return configGroupResponse;
-    } finally {
-      readWriteLock.readLock().unlock();
+    for (Config config : m_configurations.values()) {
+      Map<String, Object> configMap = new HashMap<String, Object>();
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+          config.getType());
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+          config.getTag());
+      configObjMap.add(configMap);
     }
-  }
 
-  @Override
-  @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted) {
-        ConfigGroupEntity groupEntity = configGroupDAO.findById
-          (configGroupEntity.getGroupId());
-        configGroupDAO.refresh(groupEntity);
-        // TODO What other entities should refresh?
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+        configGroupEntity.getGroupId(), cluster.getClusterName(),
+        configGroupEntity.getGroupName(), configGroupEntity.getTag(),
+        configGroupEntity.getDescription(), hostnames, configObjMap);
+    return configGroupResponse;
   }
 
-
   @Override
   public String getServiceName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getServiceName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getServiceName();
   }
 
   @Override
   public void setServiceName(String serviceName) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setServiceName(serviceName);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setServiceName(serviceName);
+    configGroupDAO.merge(configGroupEntity);
+  }
 
+  /**
+   * Gets the {@link ConfigGroupEntity} by it's ID from the JPA cache.
+   *
+   * @return the entity.
+   */
+  private ConfigGroupEntity getConfigGroupEntity() {
+    return configGroupDAO.findById(configGroupId);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index e1f5cd2..5e887d4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -67,11 +67,10 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -91,8 +90,13 @@ public class AmbariContext {
   @Inject
   private PersistedState persistedState;
 
+  /**
+   * Used for creating read-only instances of existing {@link Config} in order
+   * to send them to the {@link ConfigGroupResourceProvider} to create
+   * {@link ConfigGroup}s.
+   */
   @Inject
-  private org.apache.ambari.server.configuration.Configuration configs;
+  ConfigFactory configFactory;
 
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
@@ -458,11 +462,13 @@ public class AmbariContext {
         SortedSet<DesiredConfig> desiredConfigsOrderedByVersion = new TreeSet<>(new Comparator<DesiredConfig>() {
           @Override
           public int compare(DesiredConfig o1, DesiredConfig o2) {
-            if (o1.getVersion() < o2.getVersion())
+            if (o1.getVersion() < o2.getVersion()) {
               return -1;
+            }
 
-            if (o1.getVersion() > o2.getVersion())
+            if (o1.getVersion() > o2.getVersion()) {
               return 1;
+            }
 
             return 0;
           }
@@ -473,9 +479,9 @@ public class AmbariContext {
         int tagMatchState = 0; // 0 -> INITIAL -> tagMatchState = 1 -> TOPLOGY_RESOLVED -> tagMatchState = 2
 
         for (DesiredConfig config: desiredConfigsOrderedByVersion) {
-          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0)
+          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0) {
             tagMatchState = 1;
-          else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
+          } else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
             tagMatchState = 2;
             break;
           }
@@ -551,7 +557,6 @@ public class AmbariContext {
           addedHost = true;
           if (! group.getHosts().containsKey(host.getHostId())) {
             group.addHost(host);
-            group.persistHostMapping();
           }
 
         } catch (AmbariException e) {
@@ -585,9 +590,7 @@ public class AmbariContext {
     for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
       String type = entry.getKey();
       String service = stack.getServiceForConfigType(type);
-      Config config = new ConfigImpl(type);
-      config.setTag(groupName);
-      config.setProperties(entry.getValue());
+      Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
index 6a8057c..4c1ef5a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
@@ -53,8 +53,8 @@ import org.apache.ambari.server.orm.entities.TopologyRequestEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.commons.lang.StringUtils;
@@ -234,12 +234,12 @@ public class HostUpdateHelper {
           boolean configUpdated;
 
           // going through all cluster configs and update property values
+          ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
           for (ClusterConfigEntity clusterConfigEntity : clusterConfigEntities) {
-            ConfigImpl config = new ConfigImpl(cluster, clusterConfigEntity, injector);
+            Config config = configFactory.createExisting(cluster, clusterConfigEntity);
             configUpdated = false;
 
             for (Map.Entry<String,String> property : config.getProperties().entrySet()) {
-
               updatedPropertyValue = replaceHosts(property.getValue(), currentHostNames, hostMapping);
 
               if (updatedPropertyValue != null) {
@@ -249,8 +249,9 @@ public class HostUpdateHelper {
                 configUpdated = true;
               }
             }
+
             if (configUpdated) {
-              config.persist(false);
+              config.save();
             }
           }
         }
@@ -317,6 +318,7 @@ public class HostUpdateHelper {
   * */
   public class StringComparator implements Comparator<String> {
 
+    @Override
     public int compare(String s1, String s2) {
       return s2.length() - s1.length();
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index ffca51d..62ce93b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -36,7 +36,6 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.StackId;
@@ -128,24 +127,16 @@ public class ExecutionCommandWrapperTest {
     CONFIG_ATTRIBUTES = new HashMap<String, Map<String,String>>();
 
     //Cluster level global config
-    Config globalConfig = configFactory.createNew(cluster1, GLOBAL_CONFIG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
-    globalConfig.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(globalConfig);
+    configFactory.createNew(cluster1, GLOBAL_CONFIG, CLUSTER_VERSION_TAG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Cluster level service config
-    Config serviceSiteConfigCluster = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
-    serviceSiteConfigCluster.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigCluster);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, CLUSTER_VERSION_TAG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Service level service config
-    Config serviceSiteConfigService = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
-    serviceSiteConfigService.setTag(SERVICE_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigService);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_VERSION_TAG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
 
     //Host level service config
-    Config serviceSiteConfigHost = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
-    serviceSiteConfigHost.setTag(HOST_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigHost);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, HOST_VERSION_TAG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
 
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
index 90a4421..246c8b3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
@@ -34,8 +34,8 @@ import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.StackId;
 import org.junit.After;
@@ -103,15 +103,11 @@ public class TestActionSchedulerThreading {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String, String>>();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -122,12 +118,7 @@ public class TestActionSchedulerThreading {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 43503fa..fc2bca5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -193,11 +193,7 @@ public class HeartbeatTestHelper {
     cluster.setCurrentStackVersion(stackId);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(cluster, "cluster-env", configProperties, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
-
-    cluster.addConfig(config);
+    Config config = cf.createNew(cluster, "cluster-env", "version1", configProperties, new HashMap<String, Map<String, String>>());
     cluster.addDesiredConfig("user", Collections.singleton(config));
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index 76ab45c..68e9993 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -159,10 +159,8 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(cluster, "hadoop-env",
+    Config config = configFactory.createNew(cluster, "hadoop-env", "version1",
         new HashMap<String,String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version1");
-    cluster.addConfig(config);
     cluster.addDesiredConfig("_test", Collections.singleton(config));
 
 
@@ -243,18 +241,15 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env",
+    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env", "version1",
       new HashMap<String, String>() {{
         put("a", "b");
       }}, new HashMap<String, Map<String,String>>());
-    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env",
+    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env", "version1",
             new HashMap<String, String>() {{
               put("a", "b");
             }}, new HashMap<String, Map<String,String>>());
-    hadoopEnvConfig.setTag("version1");
-    cluster.addConfig(hadoopEnvConfig);
-    hbaseEnvConfig.setTag("version1");
-    cluster.addConfig(hbaseEnvConfig);
+
     cluster.addDesiredConfig("_test", Collections.singleton(hadoopEnvConfig));
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 6533e1c..6640837 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -218,7 +218,7 @@ public class RecoveryConfigHelperTest {
     config.updateProperties(new HashMap<String, String>() {{
       put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "false");
     }});
-    config.persist(false);
+    config.save();
 
     // Recovery config should be stale because of the above change.
     boolean isConfigStale = recoveryConfigHelper.isConfigStale(cluster.getClusterName(), DummyHostname1,

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index e54a117..2507a46 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -87,8 +87,8 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
@@ -610,6 +610,7 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+    Config config = createNiceMock(Config.class);
 
     // requests
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
@@ -632,18 +633,11 @@ public class AmbariManagementControllerImplTest {
     expect(clusters.getClusterById(1L)).andReturn(cluster).anyTimes();
     expect(cluster.getClusterName()).andReturn("clusterOld").anyTimes();
     expect(cluster.getConfigPropertiesTypes(anyObject(String.class))).andReturn(Maps.<PropertyInfo.PropertyType, Set<String>>newHashMap()).anyTimes();
-    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(new ConfigImpl("config-type") {
-      @Override
-      public Map<String, Map<String, String>> getPropertiesAttributes() {
-        return Maps.newHashMap();
-      }
-
-      @Override
-      public Map<String, String> getProperties() {
-        return configReqProps;
-      }
 
-    }).anyTimes();
+    expect(config.getType()).andReturn("config-type").anyTimes();
+    expect(config.getProperties()).andReturn(configReqProps).anyTimes();
+    expect(config.getPropertiesAttributes()).andReturn(new HashMap<String,Map<String,String>>()).anyTimes();
+    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(config).anyTimes();
 
     cluster.addSessionAttributes(anyObject(Map.class));
     expectLastCall().once();
@@ -652,7 +646,7 @@ public class AmbariManagementControllerImplTest {
     expectLastCall();
 
     // replay mocks
-    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    replay(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
 
     // test
     AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
@@ -660,7 +654,7 @@ public class AmbariManagementControllerImplTest {
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
-    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    verify(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 509ec88..298a85f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -122,7 +122,6 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
@@ -408,7 +407,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
       tag, "", configMap, hostMap);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     return configGroup.getId();
@@ -1940,10 +1938,8 @@ public class AmbariManagementControllerTest {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String,String>>();
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
-    c1.setTag("v1");
-    cluster.addConfig(c1);
-    c1.persist();
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1",  properties, propertiesAttributes);
     configs.put(c1.getType(), c1);
 
     ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
@@ -1983,26 +1979,17 @@ public class AmbariManagementControllerTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1", properties, propertiesAttributes);
     properties.put("c", cluster1);
     properties.put("d", "d1");
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, injector);
+
+    Config c2 = configFactory.createNew(cluster, "core-site", "v1", properties, propertiesAttributes);
+    Config c3 = configFactory.createNew(cluster, "foo-site", "v1", properties, propertiesAttributes);
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
 
-    c1.setTag("v1");
-    c2.setTag("v1");
-    c3.setTag("v1");
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-
     configs.put(c1.getType(), c1);
     configs.put(c2.getType(), c2);
 
@@ -4210,27 +4197,20 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String, String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
 
-    Config config3 = cf.createNew(cluster, "yarn-site",
+    Config config3 = cf.createNew(cluster, "yarn-site", "version1",
         new HashMap<String, String>() {{
           put("test.password", "supersecret");
         }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
-    cluster.addConfig(config3);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -4383,20 +4363,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
 
@@ -4488,19 +4463,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-    config1.persist();
-    config2.persist();
 
     cluster.addConfig(config1);
     cluster.addConfig(config2);
@@ -4776,18 +4747,14 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
     config1.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr1", new HashMap<String, String>()); }});
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
     config2.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr2", new HashMap<String, String>()); }});
 
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("_test", Collections.singleton(config1));
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
@@ -5522,11 +5489,8 @@ public class AmbariManagementControllerTest {
       configs3, null);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "kerberos-env",
+    Config config1 = cf.createNew(cluster, "kerberos-env", "version1",
         new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    cluster.addConfig(config1);
 
     ClusterRequest crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
@@ -6448,20 +6412,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6554,20 +6513,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6981,13 +6935,13 @@ public class AmbariManagementControllerTest {
     String group2 = getUniqueName();
     String tag2 = getUniqueName();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // Create Config group for core-site
     configs = new HashMap<String, String>();
     configs.put("a", "c");
     cluster = clusters.getCluster(cluster1);
-    final Config config = new ConfigImpl("core-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    final Config config =  configFactory.createReadOnly("core-site", "version122", configs, null);
     Long groupId = createConfigGroup(cluster, group1, tag1,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config); }});
@@ -6998,9 +6952,7 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config2 = new ConfigImpl("mapred-site");
-    config2.setProperties(configs);
-    config2.setTag("version122");
+    final Config config2 =  configFactory.createReadOnly("mapred-site", "version122", configs, null);
     groupId = createConfigGroup(cluster, group2, tag2,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config2); }});
@@ -7065,7 +7017,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = cluster.getConfigGroups().get(groupId);
     configGroup.setHosts(new HashMap<Long, Host>() {{ put(3L,
       clusters.getHost(host3)); }});
-    configGroup.persist();
 
     requestId = startService(cluster1, serviceName2, false, false);
     mapredInstall = null;
@@ -7143,9 +7094,8 @@ public class AmbariManagementControllerTest {
     String group1 = getUniqueName();
     String tag1 = getUniqueName();
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
         new ArrayList<String>() {{
           add(host1);
@@ -7253,9 +7203,8 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
       new ArrayList<String>() {{ add(host1); add(host2); }},
       new ArrayList<Config>() {{ add(config); }});

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index 96810cf..1747b28 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -66,7 +66,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -108,6 +108,7 @@ public class UpgradeResourceProviderHDP22Test {
   private AmbariManagementController amc;
   private StackDAO stackDAO;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   private static final String configTagVersion1 = "version1";
   private static final String configTagVersion2 = "version2";
@@ -136,6 +137,7 @@ public class UpgradeResourceProviderHDP22Test {
     stackDAO = injector.getInstance(StackDAO.class);
     upgradeDao = injector.getInstance(UpgradeDAO.class);
     repoVersionDao = injector.getInstance(RepositoryVersionDAO.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     AmbariEventPublisher publisher = createNiceMock(AmbariEventPublisher.class);
     replay(publisher);
@@ -233,11 +235,7 @@ public class UpgradeResourceProviderHDP22Test {
       }
     }
 
-    Config config = new ConfigImpl("hive-site");
-    config.setProperties(configTagVersion1Properties);
-    config.setTag(configTagVersion1);
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "hive-site", configTagVersion1, configTagVersion1Properties, null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
     Map<String, Object> requestProps = new HashMap<String, Object>();
@@ -286,9 +284,7 @@ public class UpgradeResourceProviderHDP22Test {
     // Hive service checks have generated the ExecutionCommands by now.
     // Change the new desired config tag and verify execution command picks up new tag
     assertEquals(configTagVersion1, cluster.getDesiredConfigByType("hive-site").getTag());
-    final Config newConfig = new ConfigImpl("hive-site");
-    newConfig.setProperties(configTagVersion2Properties);
-    newConfig.setTag(configTagVersion2);
+    final Config newConfig = configFactory.createNew(cluster, "hive-site", configTagVersion2, configTagVersion2Properties, null);
     Set<Config> desiredConfigs = new HashSet<Config>() {
       {
         add(newConfig);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index d69bdbe..844331e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -85,8 +85,8 @@ import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryActio
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
@@ -144,6 +144,7 @@ public class UpgradeResourceProviderTest {
   private StackDAO stackDAO;
   private AmbariMetaInfo ambariMetaInfo;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   @Before
   public void before() throws Exception {
@@ -174,6 +175,7 @@ public class UpgradeResourceProviderTest {
 
     amc = injector.getInstance(AmbariManagementController.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     Field field = AmbariServer.class.getDeclaredField("clusterController");
     field.setAccessible(true);
@@ -1046,16 +1048,9 @@ public class UpgradeResourceProviderTest {
     }
 
 
-    Config config = new ConfigImpl("zoo.cfg");
-    config.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
-    config.setTag("abcdefg");
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "zoo.cfg", "abcdefg", Collections.singletonMap("a", "b"), null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
-
     Map<String, Object> requestProps = new HashMap<String, Object>();
     requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
     requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.0.0");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 0163024..80a3bc5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -49,8 +49,7 @@ import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -113,6 +112,9 @@ public class ComponentVersionCheckActionTest {
   @Inject
   private ServiceComponentHostFactory serviceComponentHostFactory;
 
+  @Inject
+  private ConfigFactory configFactory;
+
   @Before
   public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -399,18 +401,11 @@ public class ComponentVersionCheckActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version1", properties, propertiesAttributes);
     properties.put("c", "c1");
     properties.put("d", "d1");
 
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
+    configFactory.createNew(cluster, "core-site", "version1", properties, propertiesAttributes);
+    configFactory.createNew(cluster, "foo-site", "version1", properties, propertiesAttributes);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 7ab2856..92fa084 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -132,13 +132,10 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -196,16 +193,13 @@ public class ConfigureActionTest {
 
     // create a config for zoo.cfg with two values; one is a stack value and the
     // other is custom
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("tickTime", "2000");
         put("foo", "bar");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -262,16 +256,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copyIt", "10");
           put("moveIt", "10");
           put("deleteIt", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -402,15 +393,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("zoo.server.csv", "c6401,c6402,  c6403");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -468,16 +456,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("key_to_replace", "My New Cat");
         put("key_with_no_match", "WxyAndZ");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -543,16 +528,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("existing", "This exists!");
         put("missing", null);
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -604,16 +586,12 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("fooKey", "barValue");
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -671,7 +649,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -680,10 +658,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -769,7 +743,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -778,10 +752,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -855,7 +825,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -865,10 +835,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -951,7 +917,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -961,10 +927,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1041,15 +1003,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1157,15 +1116,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1253,17 +1209,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1362,17 +1315,15 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2",
+        new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1466,17 +1417,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1567,17 +1515,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1674,15 +1619,12 @@ public class ConfigureActionTest {
     // service properties will not run!
     installService(c, "ZOOKEEPER");
 
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version1", new HashMap<String, String>() {
       {
         put("initLimit", "10");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
 
     // add a host component


[43/51] [abbrv] ambari git commit: AMBARI-19012. Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR - appendum: UT failure fix (oleewere)

Posted by sm...@apache.org.
AMBARI-19012. Abillity to use external Solr for Log Search instead of AMBARI_INFRA_SOLR - appendum: UT failure fix (oleewere)

Change-Id: I59204873b2fd46e90c0f6336543cc87e65e3d413


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6150f956
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6150f956
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6150f956

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 6150f9567f30c0952ffc9402f80b78965eb463b2
Parents: 5e2c267
Author: oleewere <ol...@gmail.com>
Authored: Fri Dec 9 11:49:41 2016 +0100
Committer: oleewere <ol...@gmail.com>
Committed: Fri Dec 9 12:04:32 2016 +0100

----------------------------------------------------------------------
 .../src/test/python/stacks/2.3/common/test_stack_advisor.py    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6150f956/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 4a77086..6317376 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -1966,6 +1966,12 @@ class TestHDP23StackAdvisor(TestCase):
       "ramPerContainer": 256
     }
     expected = {
+      'logfeeder-env': {'property_attributes': {'logfeeder_external_solr_kerberos_keytab': {'visible': 'false'},
+                                                'logfeeder_external_solr_kerberos_principal': {'visible': 'false'}}},
+      'logsearch-env': {'properties': {'logsearch_external_solr_kerberos_enabled': 'false'},
+                        'property_attributes': {'logsearch_external_solr_kerberos_enabled': {'visible': 'false'},
+                                                'logsearch_external_solr_kerberos_keytab': {'visible': 'false'},
+                                                'logsearch_external_solr_kerberos_principal': {'visible': 'false'}}},
       'logsearch-properties': {
         'properties': {
           "logsearch.collection.service.logs.numshards" : "2",


[23/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
deleted file mode 100644
index a6b1baa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
+++ /dev/null
@@ -1,177 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <value>false</value>
-    <display-name>Audit to DB</display-name>
-    <description>Is Audit to DB enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.db</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <value>{{audit_jdbc_url}}</value>
-    <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <value>{{xa_audit_db_user}}</value>
-    <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <value>crypted</value>
-    <property-type>PASSWORD</property-type>
-    <description>Audit DB JDBC Password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <value>{{jdbc_driver}}</value>
-    <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/db/spool</value>
-    <description>/var/log/hadoop/yarn/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
-    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
deleted file mode 100644
index 97867cc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for YARN</display-name>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-yarn-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for YARN</display-name>
-    <description>Enable ranger yarn plugin ?</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
deleted file mode 100644
index 5410104..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
deleted file mode 100644
index 5f69962..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>ranger.plugin.yarn.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing policies for this Yarn instance</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
deleted file mode 100644
index bbc2930..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
+++ /dev/null
@@ -1,200 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true" supports_adding_forbidden="true">
-  <!-- These properties were inherited from HDP 2.1 -->
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <display-name>AppTimelineServer Java heap size</display-name>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn_cgroups_enabled</name>
-    <value>false</value>
-    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
-    <display-name>CPU Isolation</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.3 -->
-  <property>
-    <name>is_supported_yarn_ranger</name>
-    <value>true</value>
-    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>yarn-env template</display-name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
-
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
-
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
-
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
-
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
-
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
-
-      # Resource Manager specific parameters
-
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
-
-      # Node Manager specific parameters
-
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
-
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
-
-
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
-
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
-
-      # restore ordinary behaviour
-      unset IFS
-
-
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100644
index 9ac34f3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>yarn-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#Relative to Yarn Log Dir Prefix
-yarn.log.dir=.
-#
-# Job Summary Appender
-#
-# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-#
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# LEVEL,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-
-# Appender for viewing information for errors and warnings
-yarn.ewma.cleanupInterval=300
-yarn.ewma.messageAgeLimitSeconds=86400
-yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
-
-# Audit logging for ResourceManager
-rm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
-
-# Audit logging for NodeManager
-nm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
index e33b91d..0f46d75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
@@ -17,798 +17,19 @@
   limitations under the License.
 -->
 <configuration supports_final="true">
-  <!-- These configs were inherited from HDP 2.1 -->
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>localhost:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>localhost:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>localhost:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <value>2678400000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <value>300000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
 
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
-    <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.registry.rm.enabled</name>
-    <value>false</value>
-    <description>
-      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-    <description>
-      List of hostname:port pairs defining the zookeeper quorum binding for the registry
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.dir</name>
-    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
-    <description>
-      The local filesystem directory in which the node manager will store
-      state when recovery is enabled.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
-    <value>10000</value>
-    <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
-    <value>60000</value>
-    <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM to recover state after starting.
-      If true, then yarn.resourcemanager.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
-    </description>
-    <display-name>Enable Work Preserving Restart</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.store.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-    <description>
-      The class to use as the persistent store.
-      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-      the store is implicitly fenced; meaning a single ResourceManager
-      is able to use the store at any point in time.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
-    <description>
-      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
-    <value>/rmstore</value>
-    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-acl</name>
-    <value>world:anyone:rwcda</value>
-    <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
-    <value>10000</value>
-    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
-    <value>30000</value>
-    <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.max-wait.ms</name>
-    <value>900000</value>
-    <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
-    <value>1000</value>
-    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
-      When HA is enabled, the value here is NOT used. It is generated
-      automatically from yarn.resourcemanager.zk-timeout-ms and
-      yarn.resourcemanager.zk-num-retries."
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-num-retries</name>
-    <value>1000</value>
-    <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-timeout-ms</name>
-    <value>10000</value>
-    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
-    <value>${yarn.resourcemanager.max-completed-applications}</value>
-    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.uri</name>
-    <value> </value>
-    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.ha.enabled</name>
-    <value>false</value>
-    <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
-    <description>Pre-requisite to use CGroups</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
-    <value>hadoop-yarn</value>
-    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
-    <value>false</value>
-    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
-    <value>/cgroup</value>
-    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
-    <value>false</value>
-    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.cpu-vcores</name>
-    <value>8</value>
-    <description>Number of vcores that can be allocated
-      for containers. This is used by the RM scheduler when allocating
-      resources for containers. This is not used to limit the number of
-      CPUs used by YARN containers. If it is set to -1 and
-      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
-      automatically determined from the hardware in case of Windows and Linux.
-      In other cases, number of vcores is 8 by default.
-    </description>
-    <display-name>Number of virtual cores</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>32</maximum>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-    <value>80</value>
-    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
-    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>100</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>
-      Retry policy used for FileSystem node label store. The policy is
-      specified by N pairs of sleep-time in milliseconds and number-of-retries
-      &quot;s1,n1,s2,n2,...&quot;.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
-    <value>1000</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-    <value>90</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-    <value>-1</value>
-    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
-    <value>false</value>
-    <description>
-      This configuration is for debug and test purpose.
-      By setting this configuration as true.
-      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
-    <value>30</value>
-    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
-    <value>10</value>
-    <description>Number of worker threads that send the yarn system metrics data.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.max-retries</name>
-    <value>30</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.retry-interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-    <description>
-      Enable age off of timeline store data.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.state-store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
-    <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-state-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb state store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb timeline store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
-    <value>104857600</value>
-    <description>
-      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.type</name>
-    <value>simple</value>
-    <description>
-      Defines authentication used for the Timeline Server HTTP endpoint.
-      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
-    <value>false</value>
-    <description>
-      Flag to enable override of the default kerberos authentication filter with
-      the RM authentication filter to allow authentication using delegation
-      tokens(fallback to kerberos if the tokens are missing).
-      Only applicable when the http authentication type is kerberos.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.root-dir</name>
-    <value>/system/yarn/node-labels</value>
-    <description>
-      URI for NodeLabelManager.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.minimum-allocation-vcores</name>
-    <value>1</value>
-    <description/>
-    <display-name>Minimum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.maximum-allocation-vcores</name>
-    <value>8</value>
-    <description/>
-    <display-name>Maximum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.enabled</name>
-    <value>false</value>
-    <description>
-      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
-    </description>
-    <display-name>Node Labels</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>core-site</type>
-        <name>hadoop.security.authentication</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>cluster-env</type>
-        <name>user_group</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
-    <description>
-      Enable a set of periodic monitors (specified in
-      yarn.resourcemanager.scheduler.monitor.policies) that affect the
-      scheduler.
-    </description>
-    <value>false</value>
-    <display-name>Pre-emption</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- In HDP 2.3, these properties were deleted:
-  yarn.node-labels.manager-class
-  -->
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>yarn.timeline-service.recovery.enabled</name>
-    <description>
-      Enable timeline server to recover state after starting. If
-      true, then yarn.timeline-service.state-store-class must be specified.
-    </description>
-    <value>true</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.authorization-provider</name>
-    <description> Yarn authorization provider class. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>yarn</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!--ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.version</name>
-    <value>1.5</value>
-    <description>Timeline service version we&#x2019;re currently using.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
-    <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
-    <value>/ats/active/</value>
-    <description>DFS path to store active application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
-    <value>/ats/done/</value>
-    <description>DFS path to store done application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
-    <value/>
-    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- advanced ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
-    <description>Summary storage for ATS v1.5</description>
-    <!-- Use rolling leveldb, advanced -->
-    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage reader.This
-      value controls how frequent the reader will scan the HDFS active directory
-      for application status.
-    </description>
-    <!-- Default is 60 seconds, advanced -->
-    <value>60</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage cleaner.This
-      value controls how frequent the reader will scan the HDFS done directory
-      for stale application data.
-    </description>
-    <!-- 3600 is default, advanced -->
-    <value>3600</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
-    <description>
-      How long the ATS v1.5 entity group file system storage will keep an
-      application's data in the done directory.
-    </description>
-    <!-- 7 days is default, advanced -->
-    <value>604800</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.4 -->
-  <property>
-    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark 2</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
   <property>
     <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
     <description>The auxiliary service classpath to use for Spark</description>
     <on-ambari-upgrade add="false"/>
   </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
   <property>
     <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
     <description>The auxiliary service classpath to use for Spark 2</description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <value>3600</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
-    <value>60000</value>
-    <description>The delay time ms to unregister container metrics after completion.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
-    <value/>
-    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
deleted file mode 100644
index e690204..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
+++ /dev/null
@@ -1,278 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
index 7e1fd78..a3a8ae9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
@@ -20,63 +20,8 @@
   <services>
     <service>
       <name>YARN</name>
-      <displayName>YARN</displayName>
-      <version>2.7.1.3.0</version>
-      <extends>common-services/YARN/2.1.0.2.0</extends>
-
-      <components>
-        <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-
-          <commandScript>
-            <script>scripts/application_timeline_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>SPARK/SPARK_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-          </configuration-dependencies>
-        </component>
-      </components>
+      <version>3.0.0.3.0</version>
+      <extends>common-services/YARN/3.0.0</extends>
 
       <osSpecifics>
         <osSpecific>
@@ -105,34 +50,12 @@
           </packages>
         </osSpecific>
       </osSpecifics>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
     </service>
 
     <service>
       <name>MAPREDUCE2</name>
       <displayName>MapReduce2</displayName>
       <version>2.7.1.3.0</version>
-      <configuration-dir>configuration-mapred</configuration-dir>
 
       <osSpecifics>
         <osSpecific>
@@ -153,21 +76,6 @@
         </osSpecific>
       </osSpecifics>
 
-      <themes-dir>themes-mapred</themes-dir>
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
     </service>
   </services>
 </metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
deleted file mode 100644
index 5ffbc07..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"mapreduce.jobhistory.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"mapred-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "jobhistory_ui",
-        "label": "JobHistory UI",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name": "jobhistory_logs",
-        "label": "JobHistory logs",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"jobhistory_jmx",
-        "label":"JobHistory JMX",
-        "requires_user_name":"false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
deleted file mode 100644
index 37248d0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"yarn.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"yarn-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "resourcemanager_ui",
-        "label": "ResourceManager UI",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_logs",
-        "label": "ResourceManager logs",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_jmx",
-        "label":"ResourceManager JMX",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file


[51/51] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-18901

Posted by sm...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-18901


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/35428030
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/35428030
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/35428030

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 354280307cb124f5666f2b4550e7ccc6aa2f7edc
Parents: bb8309e 93bc5d8
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Dec 9 13:54:46 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Dec 9 13:54:46 2016 -0800

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |    3 +
 .../ambari_agent/CustomServiceOrchestrator.py   |  188 +
 ambari-agent/src/packages/tarball/all.xml       |   30 +
 ambari-server/pom.xml                           |    2 +
 .../ambari/server/agent/ExecutionCommand.java   |   61 +
 .../ambari/server/agent/HeartBeatHandler.java   |    2 +-
 .../checks/DatabaseConsistencyCheckHelper.java  |  245 +-
 .../checks/DatabaseConsistencyChecker.java      |    1 +
 .../server/configuration/Configuration.java     |    1 -
 .../AmbariManagementControllerImpl.java         |   18 +
 .../ambari/server/controller/AmbariServer.java  |    3 +
 .../orm/entities/ClusterConfigEntity.java       |   17 +
 .../ambari/server/state/ConfigHelper.java       |   45 +
 .../ambari/server/state/PropertyInfo.java       |   15 +
 .../server/upgrade/UpgradeCatalog250.java       |   30 +
 ambari-server/src/main/python/ambari-server.py  |    2 +
 .../src/main/python/ambari_server_main.py       |    3 +
 .../common-services/DRUID/0.9.2/metainfo.xml    |    4 +-
 .../FALCON/0.5.0.2.1/quicklinks/quicklinks.json |    6 +-
 .../common-services/HDFS/3.0.0/alerts.json      | 1786 ++++
 .../HDFS/3.0.0/configuration/core-site.xml      |  224 +
 .../HDFS/3.0.0/configuration/hadoop-env.xml     |  421 +
 .../hadoop-metrics2.properties.xml              |  125 +
 .../HDFS/3.0.0/configuration/hadoop-policy.xml  |  130 +
 .../HDFS/3.0.0/configuration/hdfs-log4j.xml     |  226 +
 .../3.0.0/configuration/hdfs-logsearch-conf.xml |  248 +
 .../HDFS/3.0.0/configuration/hdfs-site.xml      |  632 ++
 .../3.0.0/configuration/ranger-hdfs-audit.xml   |  217 +
 .../ranger-hdfs-plugin-properties.xml           |   98 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 +
 .../configuration/ranger-hdfs-security.xml      |   65 +
 .../HDFS/3.0.0/configuration/ssl-client.xml     |   70 +
 .../HDFS/3.0.0/configuration/ssl-server.xml     |   80 +
 .../common-services/HDFS/3.0.0/kerberos.json    |  246 +
 .../common-services/HDFS/3.0.0/metainfo.xml     |  405 +
 .../common-services/HDFS/3.0.0/metrics.json     | 7905 ++++++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  255 +
 .../alerts/alert_datanode_unmounted_data_dir.py |  177 +
 .../package/alerts/alert_ha_namenode_health.py  |  243 +
 .../package/alerts/alert_metrics_deviation.py   |  470 ++
 .../package/alerts/alert_upgrade_finalized.py   |  179 +
 .../HDFS/3.0.0/package/files/checkWebUI.py      |   83 +
 .../HDFS/3.0.0/package/scripts/__init__.py      |   20 +
 .../scripts/balancer-emulator/balancer-err.log  | 1032 +++
 .../scripts/balancer-emulator/balancer.log      |   29 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../HDFS/3.0.0/package/scripts/datanode.py      |  178 +
 .../3.0.0/package/scripts/datanode_upgrade.py   |  156 +
 .../HDFS/3.0.0/package/scripts/hdfs.py          |  178 +
 .../HDFS/3.0.0/package/scripts/hdfs_client.py   |  122 +
 .../HDFS/3.0.0/package/scripts/hdfs_datanode.py |   85 +
 .../HDFS/3.0.0/package/scripts/hdfs_namenode.py |  562 ++
 .../3.0.0/package/scripts/hdfs_nfsgateway.py    |   75 +
 .../3.0.0/package/scripts/hdfs_rebalance.py     |  130 +
 .../3.0.0/package/scripts/hdfs_snamenode.py     |   66 +
 .../3.0.0/package/scripts/install_params.py     |   39 +
 .../HDFS/3.0.0/package/scripts/journalnode.py   |  203 +
 .../package/scripts/journalnode_upgrade.py      |  152 +
 .../HDFS/3.0.0/package/scripts/namenode.py      |  424 +
 .../3.0.0/package/scripts/namenode_ha_state.py  |  219 +
 .../3.0.0/package/scripts/namenode_upgrade.py   |  322 +
 .../HDFS/3.0.0/package/scripts/nfsgateway.py    |  151 +
 .../HDFS/3.0.0/package/scripts/params.py        |   28 +
 .../HDFS/3.0.0/package/scripts/params_linux.py  |  527 ++
 .../3.0.0/package/scripts/params_windows.py     |   79 +
 .../HDFS/3.0.0/package/scripts/service_check.py |  152 +
 .../3.0.0/package/scripts/setup_ranger_hdfs.py  |  121 +
 .../HDFS/3.0.0/package/scripts/snamenode.py     |  155 +
 .../HDFS/3.0.0/package/scripts/status_params.py |   58 +
 .../HDFS/3.0.0/package/scripts/utils.py         |  383 +
 .../HDFS/3.0.0/package/scripts/zkfc_slave.py    |  225 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/3.0.0/package/templates/hdfs.conf.j2   |   35 +
 .../HDFS/3.0.0/package/templates/slaves.j2      |   21 +
 .../HDFS/3.0.0/quicklinks/quicklinks.json       |   80 +
 .../HDFS/3.0.0/themes/theme.json                |  179 +
 .../common-services/HDFS/3.0.0/widgets.json     |  649 ++
 .../YARN/3.0.0/MAPREDUCE2_metrics.json          | 2596 ++++++
 .../YARN/3.0.0/YARN_metrics.json                | 3486 ++++++++
 .../YARN/3.0.0/YARN_widgets.json                |  670 ++
 .../common-services/YARN/3.0.0/alerts.json      |  392 +
 .../3.0.0/configuration-mapred/mapred-env.xml   |  104 +
 .../mapred-logsearch-conf.xml                   |   80 +
 .../3.0.0/configuration-mapred/mapred-site.xml  |  540 ++
 .../3.0.0/configuration/capacity-scheduler.xml  |  183 +
 .../3.0.0/configuration/ranger-yarn-audit.xml   |  177 +
 .../ranger-yarn-plugin-properties.xml           |   82 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 +
 .../configuration/ranger-yarn-security.xml      |   58 +
 .../YARN/3.0.0/configuration/yarn-env.xml       |  306 +
 .../YARN/3.0.0/configuration/yarn-log4j.xml     |  103 +
 .../3.0.0/configuration/yarn-logsearch-conf.xml |  104 +
 .../YARN/3.0.0/configuration/yarn-site.xml      | 1151 +++
 .../common-services/YARN/3.0.0/kerberos.json    |  278 +
 .../common-services/YARN/3.0.0/metainfo.xml     |  383 +
 .../package/alerts/alert_nodemanager_health.py  |  209 +
 .../alerts/alert_nodemanagers_summary.py        |  219 +
 .../files/validateYarnComponentStatusWindows.py |  161 +
 .../YARN/3.0.0/package/scripts/__init__.py      |   20 +
 .../scripts/application_timeline_server.py      |  162 +
 .../YARN/3.0.0/package/scripts/historyserver.py |  192 +
 .../YARN/3.0.0/package/scripts/install_jars.py  |   99 +
 .../package/scripts/mapred_service_check.py     |  172 +
 .../3.0.0/package/scripts/mapreduce2_client.py  |   98 +
 .../YARN/3.0.0/package/scripts/nodemanager.py   |  166 +
 .../package/scripts/nodemanager_upgrade.py      |   74 +
 .../YARN/3.0.0/package/scripts/params.py        |   32 +
 .../YARN/3.0.0/package/scripts/params_linux.py  |  476 ++
 .../3.0.0/package/scripts/params_windows.py     |   62 +
 .../3.0.0/package/scripts/resourcemanager.py    |  293 +
 .../YARN/3.0.0/package/scripts/service.py       |  106 +
 .../YARN/3.0.0/package/scripts/service_check.py |  185 +
 .../3.0.0/package/scripts/setup_ranger_yarn.py  |   71 +
 .../YARN/3.0.0/package/scripts/status_params.py |   61 +
 .../YARN/3.0.0/package/scripts/yarn.py          |  498 ++
 .../YARN/3.0.0/package/scripts/yarn_client.py   |   67 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../3.0.0/package/templates/mapreduce.conf.j2   |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/3.0.0/package/templates/yarn.conf.j2   |   35 +
 .../3.0.0/quicklinks-mapred/quicklinks.json     |   80 +
 .../YARN/3.0.0/quicklinks/quicklinks.json       |   80 +
 .../YARN/3.0.0/themes-mapred/theme.json         |  132 +
 .../YARN/3.0.0/themes/theme.json                |  250 +
 .../ZOOKEEPER/3.4.9/metainfo.xml                |   51 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |    4 +
 .../services/HDFS/configuration/hadoop-env.xml  |  214 +-
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   |   49 +-
 .../YARN/configuration-mapred/mapred-env.xml    |   58 +-
 .../YARN/configuration-mapred/mapred-site.xml   |   76 +-
 .../services/YARN/configuration/yarn-site.xml   |  784 +-
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   |   98 +-
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     |    6 +-
 .../server/agent/TestHeartbeatHandler.java      |   28 +-
 .../server/controller/AmbariServerTest.java     |   26 +
 .../server/upgrade/UpgradeCatalog250Test.java   |  216 -
 .../stacks/2.3/common/test_stack_advisor.py     |    6 +
 .../stacks/2.6/common/test_stack_advisor.py     |   52 +
 ambari-web/app/messages.js                      |    2 +
 140 files changed, 37109 insertions(+), 1383 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 8f10023,8f10023..7ae4353
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@@ -2450,7 -2450,7 +2450,6 @@@ public class Configuration 
            "log4j.monitor.delay", TimeUnit.MINUTES.toMillis(5));
  
    /**
--<<<<<<< a5fdae802210ae1f8d4fed2234f1651cbe61c2b5
     * Indicates whether parallel topology task creation is enabled for blueprint cluster provisioning.
     * Defaults to <code>false</code>.
     * @see #TOPOLOGY_TASK_PARALLEL_CREATION_THREAD_COUNT

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index e50b645,ac97987..a488107
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@@ -330,84 -329,33 +330,114 @@@ public class UpgradeCatalog250 extends 
        new DBColumnInfo(CREDENTIAL_STORE_ENABLED_COL, Short.class, null, 0, false));
    }
  
+   protected void updateAtlasConfigs() throws AmbariException {
+     AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+     Clusters clusters = ambariManagementController.getClusters();
+     if (clusters != null) {
+       Map<String, Cluster> clusterMap = clusters.getClusters();
+       if (clusterMap != null && !clusterMap.isEmpty()) {
+         for (final Cluster cluster : clusterMap.values()) {
+           updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
+           updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
+           updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
+           updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
+         }
+       }
+     }
+   }
+ 
+   protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
+     Set<String> installedServices = cluster.getServices().keySet();
+     if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
+       Config configEnv = cluster.getDesiredConfigByType(configType);
+       if (configEnv != null) {
+         Map<String, String> newProperties = new HashMap<>();
+         newProperties.put(propertyName, "true");
+         boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
+         updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
+       }
+     }
+   }
+ }
+ 
 +  /**
 +   * Updates Hive Interactive's config in hive-interactive-site.
 +   *
 +   * @throws AmbariException
 +   */
 +  protected void updateHIVEInteractiveConfigs() throws AmbariException {
 +    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
 +    Clusters clusters = ambariManagementController.getClusters();
 +    if (clusters != null) {
 +      Map<String, Cluster> clusterMap = clusters.getClusters();
 +
 +      if (clusterMap != null && !clusterMap.isEmpty()) {
 +        for (final Cluster cluster : clusterMap.values()) {
 +          Config hiveInteractiveSite = cluster.getDesiredConfigByType("hive-interactive-site");
 +          if (hiveInteractiveSite != null) {
 +            updateConfigurationProperties("hive-interactive-site", Collections.singletonMap("hive.tez.container.size",
 +                "SET_ON_FIRST_INVOCATION"), true, true);
 +
 +            updateConfigurationProperties("hive-interactive-site", Collections.singletonMap("hive.auto.convert.join.noconditionaltask.size",
 +                "1000000000"), true, true);
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Updates Tez for Hive2 Interactive's config in tez-interactive-site.
 +   *
 +   * @throws AmbariException
 +   */
 +  protected void updateTEZInteractiveConfigs() throws AmbariException {
 +    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
 +    Clusters clusters = ambariManagementController.getClusters();
 +    if (clusters != null) {
 +      Map<String, Cluster> clusterMap = clusters.getClusters();
 +
 +      if (clusterMap != null && !clusterMap.isEmpty()) {
 +        for (final Cluster cluster : clusterMap.values()) {
 +          Config tezInteractiveSite = cluster.getDesiredConfigByType("tez-interactive-site");
 +          if (tezInteractiveSite != null) {
 +
 +            updateConfigurationProperties("tez-interactive-site", Collections.singletonMap("tez.runtime.io.sort.mb", "512"), true, true);
 +
 +            updateConfigurationProperties("tez-interactive-site", Collections.singletonMap("tez.runtime.unordered.output.buffer.size-mb",
 +                "100"), true, true);
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  protected void updateAtlasConfigs() throws AmbariException {
 +    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
 +    Clusters clusters = ambariManagementController.getClusters();
 +    if (clusters != null) {
 +      Map<String, Cluster> clusterMap = clusters.getClusters();
 +      if (clusterMap != null && !clusterMap.isEmpty()) {
 +        for (final Cluster cluster : clusterMap.values()) {
 +          updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
 +          updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
 +          updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
 +          updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
 +        }
 +      }
 +    }
 +  }
 +
 +  protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
 +      Set<String> installedServices = cluster.getServices().keySet();
 +      if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
 +        Config configEnv = cluster.getDesiredConfigByType(configType);
 +        if (configEnv != null) {
 +          Map<String, String> newProperties = new HashMap<>();
 +          newProperties.put(propertyName, "true");
 +          boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
 +          updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
 +        }
 +      }
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
index cc81fca,152ff57..88e5b28
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
@@@ -32,4 -32,4 +32,4 @@@
        }
      ]
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
index e33b91d,0f46d75..05ce191
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
@@@ -17,767 -17,7 +17,6 @@@
    limitations under the License.
  -->
  <configuration supports_final="true">
-   <!-- These configs were inherited from HDP 2.1 -->
-   <property>
-     <name>yarn.timeline-service.enabled</name>
-     <value>true</value>
-     <description>Indicate to clients whether timeline service is enabled or not.
-       If enabled, clients will put entities and events to the timeline server.
-     </description>
-     <value-attributes>
-       <type>boolean</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.generic-application-history.store-class</name>
-     <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-     <description>
-       Store class name for history store, defaulting to file system store
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.webapp.address</name>
-     <value>localhost:8188</value>
-     <description>
-       The http address of the timeline service web application.
-     </description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.webapp.https.address</name>
-     <value>localhost:8190</value>
-     <description>
-       The http address of the timeline service web application.
-     </description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.address</name>
-     <value>localhost:10200</value>
-     <description>
-       This is default address for the timeline server to start
-       the RPC server.
-     </description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <description>Time to live for timeline store data in milliseconds.</description>
-     <name>yarn.timeline-service.ttl-ms</name>
-     <value>2678400000</value>
-     <value-attributes>
-       <type>int</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-     <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-     <value>300000</value>
-     <value-attributes>
-       <type>int</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
- 
-   <!-- These configs were inherited from HDP 2.2 -->
-   <property>
-     <name>yarn.application.classpath</name>
-     <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
-     <description>Classpath for typical applications.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>hadoop.registry.rm.enabled</name>
-     <value>false</value>
-     <description>
-       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>hadoop.registry.zk.quorum</name>
-     <value>localhost:2181</value>
-     <description>
-       List of hostname:port pairs defining the zookeeper quorum binding for the registry
-     </description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.recovery.enabled</name>
-     <value>true</value>
-     <description>Enable the node manager to recover after starting</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.recovery.dir</name>
-     <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
-     <description>
-       The local filesystem directory in which the node manager will store
-       state when recovery is enabled.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
-     <value>10000</value>
-     <description>Time interval between each attempt to connect to NM</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
-     <value>60000</value>
-     <description>Max time to wait to establish a connection to NM</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.recovery.enabled</name>
-     <value>true</value>
-     <description>
-       Enable RM to recover state after starting.
-       If true, then yarn.resourcemanager.store.class must be specified.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
-     <value>true</value>
-     <description>
-       Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
-     </description>
-     <display-name>Enable Work Preserving Restart</display-name>
-     <value-attributes>
-       <type>boolean</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.store.class</name>
-     <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-     <description>
-       The class to use as the persistent store.
-       If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-       the store is implicitly fenced; meaning a single ResourceManager
-       is able to use the store at any point in time.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-address</name>
-     <value>localhost:2181</value>
-     <description>
-       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-     </description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
-     <value>/rmstore</value>
-     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-acl</name>
-     <value>world:anyone:rwcda</value>
-     <description>ACL's to be used for ZooKeeper znodes.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
-     <value>10000</value>
-     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
-     <value>30000</value>
-     <description>How often to try connecting to the ResourceManager.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.connect.max-wait.ms</name>
-     <value>900000</value>
-     <description>Maximum time to wait to establish connection to ResourceManager</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
-     <value>1000</value>
-     <description>"Retry interval in milliseconds when connecting to ZooKeeper.
-       When HA is enabled, the value here is NOT used. It is generated
-       automatically from yarn.resourcemanager.zk-timeout-ms and
-       yarn.resourcemanager.zk-num-retries."
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-num-retries</name>
-     <value>1000</value>
-     <description>Number of times RM tries to connect to ZooKeeper.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.zk-timeout-ms</name>
-     <value>10000</value>
-     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
-     <value>${yarn.resourcemanager.max-completed-applications}</value>
-     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
-     <value>2000, 500</value>
-     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.fs.state-store.uri</name>
-     <value> </value>
-     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.ha.enabled</name>
-     <value>false</value>
-     <description>enable RM HA or not</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
-     <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
-     <description>Pre-requisite to use CGroups</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
-     <value>hadoop-yarn</value>
-     <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
-     <value>false</value>
-     <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
-     <value>/cgroup</value>
-     <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
-     <value>false</value>
-     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.resource.cpu-vcores</name>
-     <value>8</value>
-     <description>Number of vcores that can be allocated
-       for containers. This is used by the RM scheduler when allocating
-       resources for containers. This is not used to limit the number of
-       CPUs used by YARN containers. If it is set to -1 and
-       yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
-       automatically determined from the hardware in case of Windows and Linux.
-       In other cases, number of vcores is 8 by default.
-     </description>
-     <display-name>Number of virtual cores</display-name>
-     <value-attributes>
-       <type>int</type>
-       <minimum>0</minimum>
-       <maximum>32</maximum>
-     </value-attributes>
-     <depends-on>
-       <property>
-         <type>yarn-site</type>
-         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-     <value>80</value>
-     <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
-     <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
-     <value-attributes>
-       <type>int</type>
-       <minimum>0</minimum>
-       <maximum>100</maximum>
-       <increment-step>1</increment-step>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
-     <value>2000, 500</value>
-     <description>
-       Retry policy used for FileSystem node label store. The policy is
-       specified by N pairs of sleep-time in milliseconds and number-of-retries
-       &quot;s1,n1,s2,n2,...&quot;.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
-     <value>1000</value>
-     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-     <value>90</value>
-     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-     <value>-1</value>
-     <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
-     <value>false</value>
-     <description>
-       This configuration is for debug and test purpose.
-       By setting this configuration as true.
-       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
-     <value>30</value>
-     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
-     <value>true</value>
-     <description/>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
-     <value>10</value>
-     <description>Number of worker threads that send the yarn system metrics data.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.client.max-retries</name>
-     <value>30</value>
-     <description/>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.client.retry-interval-ms</name>
-     <value>1000</value>
-     <description/>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.ttl-enable</name>
-     <value>true</value>
-     <description>
-       Enable age off of timeline store data.
-     </description>
-     <value-attributes>
-       <type>boolean</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.state-store-class</name>
-     <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
-     <description>Store class name for timeline state store.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.leveldb-state-store.path</name>
-     <value>/hadoop/yarn/timeline</value>
-     <description>Store file name for leveldb state store.</description>
-     <value-attributes>
-       <type>directory</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-     <value>/hadoop/yarn/timeline</value>
-     <description>Store file name for leveldb timeline store.</description>
-     <value-attributes>
-       <type>directory</type>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
-     <value>104857600</value>
-     <description>
-       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
-     <value>10000</value>
-     <description>
-       Size of cache for recently read entity start times for leveldb timeline store in number of entities.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
-     <value>10000</value>
-     <description>
-       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.http-authentication.type</name>
-     <value>simple</value>
-     <description>
-       Defines authentication used for the Timeline Server HTTP endpoint.
-       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
-     <value>true</value>
-     <description/>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
-     <value>false</value>
-     <description>
-       Flag to enable override of the default kerberos authentication filter with
-       the RM authentication filter to allow authentication using delegation
-       tokens(fallback to kerberos if the tokens are missing).
-       Only applicable when the http authentication type is kerberos.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.bind-host</name>
-     <value>0.0.0.0</value>
-     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.bind-host</name>
-     <value>0.0.0.0</value>
-     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.bind-host</name>
-     <value>0.0.0.0</value>
-     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.node-labels.fs-store.root-dir</name>
-     <value>/system/yarn/node-labels</value>
-     <description>
-       URI for NodeLabelManager.
-     </description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.scheduler.minimum-allocation-vcores</name>
-     <value>1</value>
-     <description/>
-     <display-name>Minimum Container Size (VCores)</display-name>
-     <value-attributes>
-       <type>int</type>
-       <minimum>0</minimum>
-       <maximum>8</maximum>
-       <increment-step>1</increment-step>
-     </value-attributes>
-     <depends-on>
-       <property>
-         <type>yarn-site</type>
-         <name>yarn.nodemanager.resource.cpu-vcores</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.scheduler.maximum-allocation-vcores</name>
-     <value>8</value>
-     <description/>
-     <display-name>Maximum Container Size (VCores)</display-name>
-     <value-attributes>
-       <type>int</type>
-       <minimum>0</minimum>
-       <maximum>8</maximum>
-       <increment-step>1</increment-step>
-     </value-attributes>
-     <depends-on>
-       <property>
-         <type>yarn-site</type>
-         <name>yarn.nodemanager.resource.cpu-vcores</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.node-labels.enabled</name>
-     <value>false</value>
-     <description>
-       Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
-     </description>
-     <display-name>Node Labels</display-name>
-     <value-attributes>
-       <type>value-list</type>
-       <entries>
-         <entry>
-           <value>true</value>
-           <label>Enabled</label>
-         </entry>
-         <entry>
-           <value>false</value>
-           <label>Disabled</label>
-         </entry>
-       </entries>
-       <selection-cardinality>1</selection-cardinality>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.container-executor.class</name>
-     <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-     <description>ContainerExecutor for launching containers</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-       <property>
-         <type>core-site</type>
-         <name>hadoop.security.authentication</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.linux-container-executor.group</name>
-     <value>hadoop</value>
-     <description>Unix group of the NodeManager</description>
-     <depends-on>
-       <property>
-         <type>yarn-env</type>
-         <name>yarn_cgroups_enabled</name>
-       </property>
-       <property>
-         <type>cluster-env</type>
-         <name>user_group</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
-     <description>
-       Enable a set of periodic monitors (specified in
-       yarn.resourcemanager.scheduler.monitor.policies) that affect the
-       scheduler.
-     </description>
-     <value>false</value>
-     <display-name>Pre-emption</display-name>
-     <value-attributes>
-       <type>value-list</type>
-       <entries>
-         <entry>
-           <value>true</value>
-           <label>Enabled</label>
-         </entry>
-         <entry>
-           <value>false</value>
-           <label>Disabled</label>
-         </entry>
-       </entries>
-       <selection-cardinality>1</selection-cardinality>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
- 
-   <!-- In HDP 2.3, these properties were deleted:
-   yarn.node-labels.manager-class
-   -->
- 
-   <!-- These configs were inherited from HDP 2.3 -->
-   <property>
-     <name>yarn.timeline-service.recovery.enabled</name>
-     <description>
-       Enable timeline server to recover state after starting. If
-       true, then yarn.timeline-service.state-store-class must be specified.
-     </description>
-     <value>true</value>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.acl.enable</name>
-     <value>false</value>
-     <description> Are acls enabled. </description>
-     <depends-on>
-       <property>
-         <type>ranger-yarn-plugin-properties</type>
-         <name>ranger-yarn-plugin-enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.authorization-provider</name>
-     <description> Yarn authorization provider class. </description>
-     <depends-on>
-       <property>
-         <type>ranger-yarn-plugin-properties</type>
-         <name>ranger-yarn-plugin-enabled</name>
-       </property>
-     </depends-on>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.admin.acl</name>
-     <value>yarn</value>
-     <description> ACL of who can be admin of the YARN cluster. </description>
-     <value-attributes>
-       <empty-value-valid>true</empty-value-valid>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <!--ats v1.5 properties-->
-   <property>
-     <name>yarn.timeline-service.version</name>
-     <value>1.5</value>
-     <description>Timeline service version we&#x2019;re currently using.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.store-class</name>
-     <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
-     <description>Main storage class for YARN timeline server.</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
-     <value>/ats/active/</value>
-     <description>DFS path to store active application&#x2019;s timeline data</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
-     <value>/ats/done/</value>
-     <description>DFS path to store done application&#x2019;s timeline data</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
-     <value/>
-     <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
-     <value-attributes>
-       <empty-value-valid>true</empty-value-valid>
-     </value-attributes>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <!-- advanced ats v1.5 properties-->
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
-     <description>Summary storage for ATS v1.5</description>
-     <!-- Use rolling leveldb, advanced -->
-     <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
-     <description>
-       Scan interval for ATS v1.5 entity group file system storage reader.This
-       value controls how frequent the reader will scan the HDFS active directory
-       for application status.
-     </description>
-     <!-- Default is 60 seconds, advanced -->
-     <value>60</value>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
-     <description>
-       Scan interval for ATS v1.5 entity group file system storage cleaner.This
-       value controls how frequent the reader will scan the HDFS done directory
-       for stale application data.
-     </description>
-     <!-- 3600 is default, advanced -->
-     <value>3600</value>
-     <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-     <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
-     <description>
-       How long the ATS v1.5 entity group file system storage will keep an
-       application's data in the done directory.
-     </description>
-     <!-- 7 days is default, advanced -->
-     <value>604800</value>
-     <on-ambari-upgrade add="true"/>
-   </property>
- 
-   <!-- These configs were inherited from HDP 2.4 -->
-   <property>
-     <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
-     <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-     <description>The auxiliary service class to use for Spark</description>
-     <on-ambari-upgrade add="true"/>
-   </property>
--
-   <!-- These configs were inherited from HDP 2.5 -->
-   <property>
-     <name>yarn.nodemanager.aux-services</name>
-     <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-     <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
-     <on-ambari-upgrade add="false"/>
-   </property>
-   <property>
-     <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
-     <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-     <description>The auxiliary service class to use for Spark 2</description>
-     <on-ambari-upgrade add="false"/>
-   </property>
    <property>
      <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
      <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
index 7e1fd78,a3a8ae9..6395eab
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
@@@ -153,21 -76,6 +76,6 @@@
          </osSpecific>
        </osSpecifics>
  
-       <themes-dir>themes-mapred</themes-dir>
-       <themes>
-         <theme>
-           <fileName>theme.json</fileName>
-           <default>true</default>
-         </theme>
-       </themes>
- 
-       <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
-       <quickLinksConfigurations>
-         <quickLinksConfiguration>
-           <fileName>quicklinks.json</fileName>
-           <default>true</default>
-         </quickLinksConfiguration>
-       </quickLinksConfigurations>
      </service>
    </services>
--</metainfo>
++</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/35428030/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------


[35/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer.log
new file mode 100644
index 0000000..2010c02
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer.log
@@ -0,0 +1,29 @@
+Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
+Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
+Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
+Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
+Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
+Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
+Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
+Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
+Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
+Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
+Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
+Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
+Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
+Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
+Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
+Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
+Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
+Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
+Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
+Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
+The cluster is balanced. Exiting...
+Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/hdfs-command.py
new file mode 100644
index 0000000..88529b4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/hdfs-command.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, handle, interval):
+  with open(path) as f:
+      for line in f:
+          handle.write(line)
+          handle.flush()
+          time.sleep(interval)
+          
+thread = Thread(target =  write_function, args = ('balancer.out', sys.stdout, 1.5))
+thread.start()
+
+threaderr = Thread(target =  write_function, args = ('balancer.err', sys.stderr, 1.5 * 0.023))
+threaderr.start()
+
+thread.join()  
+
+
+def rebalancer_out():
+  write_function('balancer.out', sys.stdout)
+  
+def rebalancer_err():
+  write_function('balancer.err', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode.py
new file mode 100644
index 0000000..130c021
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode.py
@@ -0,0 +1,178 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import datanode_upgrade
+from hdfs_datanode import datanode
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
+from resource_management.core.logger import Logger
+from hdfs import hdfs
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from utils import get_hdfs_binary
+
+class DataNode(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-datanode"
+
+  def get_hdfs_binary(self):
+    """
+    Get the name or path to the hdfs binary depending on the component name.
+    """
+    component_name = self.get_component_name()
+    return get_hdfs_binary(component_name)
+
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs("datanode")
+    datanode(action="configure")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    datanode(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    # pre-upgrade steps shutdown the datanode, so there's no need to call
+
+    hdfs_binary = self.get_hdfs_binary()
+    if upgrade_type == "rolling":
+      stopped = datanode_upgrade.pre_rolling_upgrade_shutdown(hdfs_binary)
+      if not stopped:
+        datanode(action="stop")
+    else:
+      datanode(action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    datanode(action = "status")
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class DataNodeDefault(DataNode):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-datanode", params.version)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+    hdfs_binary = self.get_hdfs_binary()
+    # ensure the DataNode has started and rejoined the cluster
+    datanode_upgrade.post_upgrade_check(hdfs_binary)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.datanode.keytab.file',
+                         'dfs.datanode.kerberos.principal']
+    props_read_check = ['dfs.datanode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
+                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.datanode_pid_file]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class DataNodeWindows(DataNode):
+  def install(self, env):
+    import install_params
+    self.install_packages(env)
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode_upgrade.py
new file mode 100644
index 0000000..b55237d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/datanode_upgrade.py
@@ -0,0 +1,156 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions import check_process_status
+from resource_management.core import ComponentIsNotRunning
+from utils import get_dfsadmin_base_command
+
+
+def pre_rolling_upgrade_shutdown(hdfs_binary):
+  """
+  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
+  DataNode in preparation for an upgrade. This will then periodically check
+  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
+  """
+  import params
+
+  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')
+
+  code, output = shell.call(command, user=params.hdfs_user)
+  if code == 0:
+    # verify that the datanode is down
+    _check_datanode_shutdown(hdfs_binary)
+  else:
+    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
+    if output is not None and re.search("Shutdown already in progress", output):
+      Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
+      return False
+  return True
+
+
+def post_upgrade_check(hdfs_binary):
+  """
+  Verifies that the DataNode has rejoined the cluster. This function will
+  obtain the Kerberos ticket if security is enabled.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return:
+  """
+  import params
+
+  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd, user=params.hdfs_user)
+
+  # verify that the datanode has started and rejoined the HDFS cluster
+  _check_datanode_startup(hdfs_binary)
+
+
+def is_datanode_process_running():
+  import params
+  try:
+    check_process_status(params.datanode_pid_file)
+    return True
+  except ComponentIsNotRunning:
+    return False
+
+@retry(times=24, sleep_time=5, err_class=Fail)
+def _check_datanode_shutdown(hdfs_binary):
+  """
+  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
+  several times, pausing in between runs. Once the DataNode stops responding
+  this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  The stack defaults for retrying for HDFS are also way too slow for this
+  command; they are set to wait about 45 seconds between client retries. As
+  a result, a single execution of dfsadmin will take 45 seconds to retry and
+  the DataNode may be marked as dead, causing problems with HBase.
+  https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
+  times for ipc.client.connect.retry.interval. In the meantime, override them
+  here, but only for RU.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return:
+  """
+  import params
+
+  # override stock retry timeouts since after 30 seconds, the datanode is
+  # marked as dead and can affect HBase during RU
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
+
+  try:
+    Execute(command, user=params.hdfs_user, tries=1)
+  except:
+    Logger.info("DataNode has successfully shutdown for upgrade.")
+    return
+
+  Logger.info("DataNode has not shutdown.")
+  raise Fail('DataNode has not shutdown.')
+
+
+@retry(times=30, sleep_time=30, err_class=Fail) # keep trying for 15 mins
+def _check_datanode_startup(hdfs_binary):
+  """
+  Checks that a DataNode process is running and DataNode is reported as being alive via the
+  "hdfs dfsadmin -fs {namenode_address} -report -live" command. Once the DataNode is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return:
+  """
+
+  if not is_datanode_process_running():
+    Logger.info("DataNode process is not running")
+    raise Fail("DataNode process is not running")
+
+  import params
+  import socket
+
+  try:
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    command = dfsadmin_base_command + ' -report -live'
+    return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
+  except:
+    raise Fail('Unable to determine if the DataNode has started after upgrade.')
+
+  if return_code == 0:
+    hostname = params.hostname.lower()
+    hostname_ip =  socket.gethostbyname(params.hostname.lower())
+    if hostname in hdfs_output.lower() or hostname_ip in hdfs_output.lower():
+      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
+      return
+    else:
+      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
+
+  # return_code is not 0, fail
+  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs.py
new file mode 100644
index 0000000..d9b62e2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs.py
@@ -0,0 +1,178 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources import Package
+from resource_management.core.source import Template
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.resources.xml_config import XmlConfig
+import os
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hdfs(name=None):
+  import params
+
+  if params.create_lib_snappy_symlinks:
+    install_snappy()
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+  )
+
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-client" in params.config['configurations']:
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    Directory(params.hadoop_conf_secure_dir,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              cd_access='a',
+              )
+
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_secure_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-server" in params.config['configurations']:
+    XmlConfig("ssl-server.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
+       owner=tc_owner,
+       content=Template("slaves.j2")
+  )
+  
+  if params.lzo_enabled and len(params.lzo_packages) > 0:
+      Package(params.lzo_packages,
+              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+              retry_count=params.agent_stack_retry_count)
+      
+def install_snappy():
+  import params
+  Directory([params.so_target_dir_x86, params.so_target_dir_x64],
+            create_parents = True,
+  )    
+  Link(params.so_target_x86,
+       to=params.so_src_x86,
+  )
+  Link(params.so_target_x64,
+       to=params.so_src_x64,
+  )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hdfs(component=None):
+  import params
+  if component == "namenode":
+    directories = params.dfs_name_dir.split(",")
+    Directory(directories,
+              owner=params.hdfs_user,
+              mode="(OI)(CI)F",
+              create_parents = True
+    )
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         mode="f",
+         )
+  if params.service_map.has_key(component):
+    service_name = params.service_map[component]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username=params.hdfs_user,
+                  password=Script.get_password(params.hdfs_user))
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              mode="f",
+              configuration_attributes=params.config['configuration_attributes']['hadoop-policy']
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            mode="f",
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site']
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..4dabdbc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_client.py
@@ -0,0 +1,122 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hdfs import hdfs
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HdfsClient(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HdfsClientDefault(HdfsClient):
+
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    hdfs_expectations ={}
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'core-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues: # If all validations passed successfully
+        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
+          try:
+            cached_kinit_executor(status_params.kinit_path_local,
+                       status_params.hdfs_user,
+                       status_params.hdfs_user_keytab,
+                       status_params.hdfs_user_principal,
+                       status_params.hostname,
+                       status_params.tmp_dir)
+            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+          except Exception as e:
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityStateErrorInfo": str(e)})
+        else:
+          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HdfsClientWindows(HdfsClient):
+  def install(self, env):
+    import install_params
+    self.install_packages(env)
+    self.configure(env)
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..2d3d4f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_datanode.py
@@ -0,0 +1,85 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
+from utils import service
+from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
+from ambari_commons import OSConst
+
+
+def create_dirs(data_dir):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  import params
+  Directory(data_dir,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def datanode(action=None):
+  if action == "configure":
+    import params
+    Directory(params.dfs_domain_socket_dir,
+              create_parents = True,
+              mode=0751,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+    # handle_mounted_dirs ensures that we don't create dfs data dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+    data_dir_to_mount_file_content = handle_mounted_dirs(create_dirs, params.dfs_data_dirs, params.data_dir_mount_file, params)
+    # create a history file used by handle_mounted_dirs
+    File(params.data_dir_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=data_dir_to_mount_file_content
+    )
+
+  elif action == "start" or action == "stop":
+    import params
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+  elif action == "status":
+    import status_params
+    check_process_status(status_params.datanode_pid_file)
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def datanode(action=None):
+  if action == "configure":
+    pass
+  elif(action == "start" or action == "stop"):
+    import params
+    Service(params.datanode_win_service_name, action=action)
+  elif action == "status":
+    import status_params
+    check_windows_service_status(status_params.datanode_win_service_name)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..23119f0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_namenode.py
@@ -0,0 +1,562 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os.path
+import time
+
+from resource_management.core import shell
+from resource_management.core.source import Template
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.resources.service import Service
+from resource_management.libraries.functions import namenode_ha_utils
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.libraries.functions import Direction
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
+from utils import get_dfsadmin_base_command
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+
+from utils import service, safe_zkfc_op, is_previous_fs_image
+from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_directories
+
+import namenode_upgrade
+
+def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
+  """
+  During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
+  all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
+  If HA is present, then this command will run individually on each NameNode, which checks for its own address.
+  """
+  import params
+
+  retries = 115
+  sleep_seconds = 10
+  sleep_minutes = int(sleep_seconds * retries / 60)
+
+  Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
+
+  if params.security_enabled and execute_kinit:
+    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
+    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+  try:
+    # Note, this fails if namenode_address isn't prefixed with "params."
+
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
+    is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"
+
+    # Wait up to 30 mins
+    Execute(is_namenode_safe_mode_off, tries=retries, try_sleep=sleep_seconds,
+      user=params.hdfs_user, logoutput=True)
+
+    # Wait a bit more since YARN still depends on block reports coming in.
+    # Also saw intermittent errors with HBASE service check if it was done too soon.
+    time.sleep(afterwait_sleep)
+  except Fail:
+    Logger.error("The NameNode is still in Safemode. Please be careful with commands that need Safemode OFF.")
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
+    upgrade_suspended=False, env=None):
+
+  if action is None:
+    raise Fail('"action" parameter is required for function namenode().')
+
+  if action in ["start", "stop"] and hdfs_binary is None:
+    raise Fail('"hdfs_binary" parameter is required for function namenode().')
+
+  if action == "configure":
+    import params
+    #we need this directory to be present before any action(HA manual steps for
+    #additional namenode)
+    create_name_dirs(params.dfs_name_dir)
+  elif action == "start":
+    Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
+    setup_ranger_hdfs(upgrade_type=upgrade_type)
+    import params
+    if do_format and not params.hdfs_namenode_format_disabled:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    if params.dfs_ha_enabled and \
+      params.dfs_ha_namenode_standby is not None and \
+      params.hostname == params.dfs_ha_namenode_standby:
+        # if the current host is the standby NameNode in an HA deployment
+        # run the bootstrap command, to start the NameNode in standby mode
+        # this requires that the active NameNode is already up and running,
+        # so this execute should be re-tried upon failure, up to a timeout
+        success = bootstrap_standby_namenode(params)
+        if not success:
+          raise Fail("Could not bootstrap standby namenode")
+
+    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+      # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
+      # to kill ZKFC manually, so we need to start it if not already running.
+      safe_zkfc_op(action, env)
+
+    options = ""
+    if upgrade_type == "rolling":
+      if params.upgrade_direction == Direction.UPGRADE:
+        options = "-rollingUpgrade started"
+      elif params.upgrade_direction == Direction.DOWNGRADE:
+        options = "-rollingUpgrade downgrade"
+    elif upgrade_type == "nonrolling":
+      is_previous_image_dir = is_previous_fs_image()
+      Logger.info("Previous file system image dir present is {0}".format(str(is_previous_image_dir)))
+
+      if params.upgrade_direction == Direction.UPGRADE:
+        options = "-rollingUpgrade started"
+      elif params.upgrade_direction == Direction.DOWNGRADE:
+        options = "-rollingUpgrade downgrade"
+    elif upgrade_type is None and upgrade_suspended is True:
+      # the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
+      if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
+        options = "-rollingUpgrade started"
+      else:
+        Logger.info("The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
+                    "Assuming that the upgrade of NameNode has not occurred yet.".format(namenode_upgrade.get_upgrade_in_progress_marker()))
+
+    Logger.info("Options for start command are: {0}".format(options))
+
+    service(
+      action="start",
+      name="namenode",
+      user=params.hdfs_user,
+      options=options,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+              user = params.hdfs_user)
+
+    # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
+    # no-HA                 | ON -> OFF                | Yes                      |
+    # HA and active         | ON -> OFF                | Yes                      |
+    # HA and standby        | no change                | No                       |
+    # RU with HA on active  | ON -> OFF                | Yes                      |
+    # RU with HA on standby | ON -> OFF                | Yes                      |
+    # EU with HA on active  | ON -> OFF                | No                       |
+    # EU with HA on standby | ON -> OFF                | No                       |
+    # EU non-HA             | ON -> OFF                | No                       |
+
+    # because we do things like create directories after starting NN,
+    # the vast majority of the time this should be True - it should only
+    # be False if this is HA and we are the Standby NN
+    ensure_safemode_off = True
+
+    # True if this is the only NameNode (non-HA) or if its the Active one in HA
+    is_active_namenode = True
+
+    if params.dfs_ha_enabled:
+      Logger.info("Waiting for the NameNode to broadcast whether it is Active or Standby...")
+
+      if is_this_namenode_active() is False:
+        # we are the STANDBY NN
+        is_active_namenode = False
+
+        # we are the STANDBY NN and this restart is not part of an upgrade
+        if upgrade_type is None:
+          ensure_safemode_off = False
+
+
+    # During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
+    # so always disable the Safemode check
+    if upgrade_type == "nonrolling":
+      ensure_safemode_off = False
+
+    # some informative logging separate from the above logic to keep things a little cleaner
+    if ensure_safemode_off:
+      Logger.info("Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
+        params.dfs_ha_enabled, is_active_namenode, upgrade_type))
+    else:
+      Logger.info("Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
+        params.dfs_ha_enabled, is_active_namenode, upgrade_type))
+
+
+    # wait for Safemode to end
+    if ensure_safemode_off:
+      wait_for_safemode_off(hdfs_binary)
+
+    # Always run this on the "Active" NN unless Safemode has been ignored
+    # in the case where safemode was ignored (like during an express upgrade), then
+    # NN will be in SafeMode and cannot have directories created
+    if is_active_namenode and ensure_safemode_off:
+      create_hdfs_directories()
+      create_ranger_audit_hdfs_directories()
+    else:
+      Logger.info("Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish.")
+
+  elif action == "stop":
+    import params
+    service(
+      action="stop", name="namenode", 
+      user=params.hdfs_user
+    )
+  elif action == "status":
+    import status_params
+    check_process_status(status_params.namenode_pid_file)
+  elif action == "decommission":
+    decommission()
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
+    upgrade_suspended=False, env=None):
+
+  if action is None:
+    raise Fail('"action" parameter is required for function namenode().')
+
+  if action in ["start", "stop"] and hdfs_binary is None:
+    raise Fail('"hdfs_binary" parameter is required for function namenode().')
+
+  if action == "configure":
+    pass
+  elif action == "start":
+    import params
+    #TODO: Replace with format_namenode()
+    namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
+    if not os.path.exists(namenode_format_marker):
+      hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+      Execute("%s namenode -format" % (hadoop_cmd))
+      open(namenode_format_marker, 'a').close()
+    Service(params.namenode_win_service_name, action=action)
+  elif action == "stop":
+    import params
+    Service(params.namenode_win_service_name, action=action)
+  elif action == "status":
+    import status_params
+    check_windows_service_status(status_params.namenode_win_service_name)
+  elif action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+  )
+
+
+def create_hdfs_directories():
+  import params
+
+  params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777,
+  )
+  params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode,
+  )
+  params.HdfsResource(None, 
+                      action="execute",
+  )
+
+def format_namenode(force=None):
+  import params
+
+  old_mark_dir = params.namenode_formatted_old_mark_dirs
+  mark_dir = params.namenode_formatted_mark_dirs
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
+    else:
+      if not is_namenode_formatted(params):
+        Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
+                user = params.hdfs_user,
+                path = [params.hadoop_bin_dir]
+        )
+        for m_dir in mark_dir:
+          Directory(m_dir,
+            create_parents = True
+          )
+  else:
+    if params.dfs_ha_namenode_active is not None and \
+       params.hostname == params.dfs_ha_namenode_active:
+      # check and run the format command in the HA deployment scenario
+      # only format the "active" namenode in an HA deployment
+      if force:
+        ExecuteHadoop('namenode -format',
+                      bin_dir=params.hadoop_bin_dir,
+                      conf_dir=hadoop_conf_dir)
+      else:
+        nn_name_dirs = params.dfs_name_dir.split(',')
+        if not is_namenode_formatted(params):
+          try:
+            Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
+                    user = params.hdfs_user,
+                    path = [params.hadoop_bin_dir]
+            )
+          except Fail:
+            # We need to clean-up mark directories, so we can re-run format next time.
+            for nn_name_dir in nn_name_dirs:
+              Execute(format("rm -rf {nn_name_dir}/*"),
+                      user = params.hdfs_user,
+              )
+            raise
+          for m_dir in mark_dir:
+            Directory(m_dir,
+              create_parents = True
+            )
+
+def is_namenode_formatted(params):
+  old_mark_dirs = params.namenode_formatted_old_mark_dirs
+  mark_dirs = params.namenode_formatted_mark_dirs
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  marked = False
+  # Check if name directories have been marked as formatted
+  for mark_dir in mark_dirs:
+    if os.path.isdir(mark_dir):
+      marked = True
+      Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
+    
+  # Ensure that all mark dirs created for all name directories
+  if marked:
+    for mark_dir in mark_dirs:
+      Directory(mark_dir,
+        create_parents = True
+      )      
+    return marked  
+  
+  # Move all old format markers to new place
+  for old_mark_dir in old_mark_dirs:
+    if os.path.isdir(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Execute(('cp', '-ar', old_mark_dir, mark_dir),
+                sudo = True
+        )
+        marked = True
+      Directory(old_mark_dir,
+        action = "delete"
+      )    
+    elif os.path.isfile(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Directory(mark_dir,
+                  create_parents = True,
+        )
+      Directory(old_mark_dir,
+        action = "delete"
+      )
+      marked = True
+      
+  if marked:
+    return True
+
+  # Check if name dirs are not empty
+  for name_dir in nn_name_dirs:
+    code, out = shell.call(("ls", name_dir))
+    dir_exists_and_valid = bool(not code)
+
+    if not dir_exists_and_valid: # situations if disk exists but is crashed at the moment (ls: reading directory ...: Input/output error)
+      Logger.info(format("NameNode will not be formatted because the directory {name_dir} is missing or cannot be checked for content. {out}"))
+      return True
+
+    try:
+      Execute(format("ls {name_dir} | wc -l  | grep -q ^0$"),
+      )
+    except Fail:
+      Logger.info(format("NameNode will not be formatted since {name_dir} exists and contains content"))
+      return True
+       
+  return False
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+  nn_kinit_cmd = params.nn_kinit_cmd
+  
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+  
+  if not params.update_exclude_file_only:
+    Execute(nn_kinit_cmd,
+            user=hdfs_user
+    )
+
+    if params.dfs_ha_enabled:
+      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+      # need to execute each command scoped to a particular namenode
+      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+    else:
+      nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshNodes')
+    ExecuteHadoop(nn_refresh_cmd,
+                  user=hdfs_user,
+                  conf_dir=conf_dir,
+                  bin_dir=params.hadoop_bin_dir)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def decommission():
+  import params
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user
+  )
+
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+  else:
+    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs {namenode_address} -refreshNodes')
+  Execute(nn_refresh_cmd, user=hdfs_user)
+
+
+def bootstrap_standby_namenode(params, use_path=False):
+  mark_dirs = params.namenode_bootstrapped_mark_dirs
+  bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""
+  try:
+    iterations = 50
+    bootstrapped = False
+    bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
+    # Blue print based deployments start both NN in parallel and occasionally
+    # the first attempt to bootstrap may fail. Depending on how it fails the
+    # second attempt may not succeed (e.g. it may find the folder and decide that
+    # bootstrap succeeded). The solution is to call with -force option but only
+    # during initial start
+    if params.command_phase == "INITIAL_START":
+      # force bootstrap in INITIAL_START phase
+      bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
+    elif is_namenode_bootstrapped(params):
+      # Once out of INITIAL_START phase bootstrap only if we couldnt bootstrap during cluster deployment
+      return True
+    Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
+    for i in range(iterations):
+      Logger.info('Try %d out of %d' % (i+1, iterations))
+      code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
+      if code == 0:
+        Logger.info("Standby namenode bootstrapped successfully")
+        bootstrapped = True
+        break
+      elif code == 5:
+        Logger.info("Standby namenode already bootstrapped")
+        bootstrapped = True
+        break
+      else:
+        Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
+  except Exception as ex:
+    Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
+  if bootstrapped:
+    for mark_dir in mark_dirs:
+      Directory(mark_dir,
+                create_parents = True
+                )
+  return bootstrapped
+
+def is_namenode_bootstrapped(params):
+  mark_dirs = params.namenode_bootstrapped_mark_dirs
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  marked = False
+  # Check if name directories have been marked as formatted
+  for mark_dir in mark_dirs:
+    if os.path.isdir(mark_dir):
+      marked = True
+      Logger.info(format("{mark_dir} exists. Standby Namenode already bootstrapped"))
+      break
+
+  # Ensure that all mark dirs created for all name directories
+  if marked:
+    for mark_dir in mark_dirs:
+      Directory(mark_dir,
+                create_parents = True
+                )
+
+  return marked
+
+
+@retry(times=125, sleep_time=5, backoff_factor=2, err_class=Fail)
+def is_this_namenode_active():
+  """
+  Gets whether the current NameNode is Active. This function will wait until the NameNode is
+  listed as being either Active or Standby before returning a value. This is to ensure that
+  that if the other NameNode is Active, we ensure that this NameNode has fully loaded and
+  registered in the event that the other NameNode is going to be restarted. This prevents
+  a situation where we detect the other NameNode as Active before this NameNode has fully booted.
+  If the other Active NameNode is then restarted, there can be a loss of service if this
+  NameNode has not entered Standby.
+  """
+  import params
+
+  # returns ([('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
+  #                  0                                           1                                   2
+  # or
+  # returns ([], [('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
+  #          0                                              1                                             2
+  #
+  namenode_states = namenode_ha_utils.get_namenode_states(params.hdfs_site, params.security_enabled,
+    params.hdfs_user, times=5, sleep_time=5, backoff_factor=2)
+
+  # unwraps [('nn1', 'c6401.ambari.apache.org:50070')]
+  active_namenodes = [] if len(namenode_states[0]) < 1 else namenode_states[0]
+
+  # unwraps [('nn2', 'c6402.ambari.apache.org:50070')]
+  standby_namenodes = [] if len(namenode_states[1]) < 1 else namenode_states[1]
+
+  # check to see if this is the active NameNode
+  for entry in active_namenodes:
+    if params.namenode_id in entry:
+      return True
+
+  # if this is not the active NameNode, then we must wait for it to register as standby
+  for entry in standby_namenodes:
+    if params.namenode_id in entry:
+      return False
+
+  # this this point, this NameNode is neither active nor standby - we must wait to ensure it
+  # enters at least one of these roles before returning a verdict - the annotation will catch
+  # this failure and retry the fuction automatically
+  raise Fail(format("The NameNode {namenode_id} is not listed as Active or Standby, waiting..."))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_nfsgateway.py
new file mode 100644
index 0000000..672312a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_nfsgateway.py
@@ -0,0 +1,75 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core.resources import Directory
+from resource_management.core import shell
+from utils import service
+import subprocess,os
+
+# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
+# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
+
+def prepare_rpcbind():
+  Logger.info("check if native nfs server is running")
+  p, output = shell.call("pgrep nfsd")
+  if p == 0 :
+    Logger.info("native nfs server is running. shutting it down...")
+    # shutdown nfs
+    shell.call("service nfs stop")
+    shell.call("service nfs-kernel-server stop")
+    Logger.info("check if the native nfs server is down...")
+    p, output = shell.call("pgrep nfsd")
+    if p == 0 :
+      raise Fail("Failed to shutdown native nfs service")
+
+  Logger.info("check if rpcbind or portmap is running")
+  p, output = shell.call("pgrep rpcbind")
+  q, output = shell.call("pgrep portmap")
+
+  if p!=0 and q!=0 :
+    Logger.info("no portmap or rpcbind running. starting one...")
+    p, output = shell.call(("service", "rpcbind", "start"), sudo=True)
+    q, output = shell.call(("service", "portmap", "start"), sudo=True)
+    if p!=0 and q!=0 :
+      raise Fail("Failed to start rpcbind or portmap")
+
+  Logger.info("now we are ready to start nfs gateway")
+
+
+def nfsgateway(action=None, format=False):
+  import params
+
+  if action== "start":
+    prepare_rpcbind()
+
+  if action == "configure":
+    Directory(params.nfs_file_dump_dir,
+              owner = params.hdfs_user,
+              group = params.user_group,
+    )
+  elif action == "start" or action == "stop":
+    service(
+      action=action,
+      name="nfs3",
+      user=params.root_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_rebalance.py
new file mode 100644
index 0000000..1dc545e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_rebalance.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+  def __init__(self):
+    self.initialLine = None
+    self.state = None
+  
+  def parseLine(self, line):
+    hdfsLine = HdfsLine()
+    type, matcher = hdfsLine.recognizeType(line)
+    if(type == HdfsLine.LineType.HeaderStart):
+      self.state = 'PROCESS_STARTED'
+    elif (type == HdfsLine.LineType.Progress):
+      self.state = 'PROGRESS'
+      hdfsLine.parseProgressLog(line, matcher)
+      if(self.initialLine == None): self.initialLine = hdfsLine
+      
+      return hdfsLine 
+    elif (type == HdfsLine.LineType.ProgressEnd):
+      self.state = 'PROCESS_FINISED'
+    return None
+    
+class HdfsLine():
+  
+  class LineType:
+    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+  
+  
+  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+  
+  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+  PROGRESS_PATTERN = re.compile(
+                            "(?P<date>.*?)\s+" + 
+                            "(?P<iteration>\d+)\s+" + 
+                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
+                            MEMORY_PATTERN % (2,2,2) + "\s+" +
+                            MEMORY_PATTERN % (3,3,3)
+                            )
+  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+  
+  def __init__(self):
+    self.date = None
+    self.iteration = None
+    self.bytesAlreadyMoved = None 
+    self.bytesLeftToMove = None
+    self.bytesBeingMoved = None 
+    self.bytesAlreadyMovedStr = None 
+    self.bytesLeftToMoveStr = None
+    self.bytesBeingMovedStr = None 
+  
+  def recognizeType(self, line):
+    for (type, pattern) in (
+                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
+                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+                            ):
+      m = re.match(pattern, line)
+      if m:
+        return type, m
+    return HdfsLine.LineType.Unknown, None
+    
+  def parseProgressLog(self, line, m):
+    '''
+    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+    
+    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+    
+    Throws AmbariException in case of parsing errors
+
+    '''
+    m = re.match(self.PROGRESS_PATTERN, line)
+    if m:
+      self.date = m.group('date') 
+      self.iteration = int(m.group('iteration'))
+       
+      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
+      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
+      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+       
+      self.bytesAlreadyMovedStr = m.group('memmult_1') 
+      self.bytesLeftToMoveStr = m.group('memmult_2')
+      self.bytesBeingMovedStr = m.group('memmult_3') 
+    else:
+      raise AmbariException("Failed to parse line [%s]") 
+  
+  def parseMemory(self, memorySize, multiplier_type):
+    try:
+      factor = self.MEMORY_SUFFIX.index(multiplier_type)
+    except ValueError:
+      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+    
+    return float(memorySize) * (1024 ** factor)
+  def toJson(self):
+    return {
+            'timeStamp' : self.date,
+            'iteration' : self.iteration,
+            
+            'dataMoved': self.bytesAlreadyMovedStr,
+            'dataLeft' : self.bytesLeftToMoveStr,
+            'dataBeingMoved': self.bytesBeingMovedStr,
+            
+            'bytesMoved': self.bytesAlreadyMoved,
+            'bytesLeft' : self.bytesLeftToMove,
+            'bytesBeingMoved': self.bytesBeingMoved,
+          }
+  def __str__(self):
+    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..8d4c40c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,66 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from utils import service
+from resource_management.core.resources.system import Directory, File
+from resource_management.core.source import Template
+from resource_management.libraries.functions.check_process_status import check_process_status
+from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def snamenode(action=None, format=False):
+  if action == "configure":
+    import params
+    for fs_checkpoint_dir in params.fs_checkpoint_dirs:
+      Directory(fs_checkpoint_dir,
+                create_parents = True,
+                cd_access="a",
+                mode=0755,
+                owner=params.hdfs_user,
+                group=params.user_group)
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group)
+  elif action == "start" or action == "stop":
+    import params
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+  elif action == "status":
+    import status_params
+    check_process_status(status_params.snamenode_pid_file)
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def snamenode(action=None, format=False):
+  if action == "configure":
+    pass
+  elif action == "start" or action == "stop":
+    import params
+    Service(params.snamenode_win_service_name, action=action)
+  elif action == "status":
+    import status_params
+    check_windows_service_status(status_params.snamenode_win_service_name)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/install_params.py
new file mode 100644
index 0000000..fe488c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/install_params.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+
+# These parameters are supposed to be referenced at installation time, before the Hadoop environment variables have been set
+if OSCheck.is_windows_family():
+  exclude_packages = []
+else:
+  from resource_management.libraries.functions.default import default
+  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+  from resource_management.libraries.script.script import Script
+
+  _config = Script.get_config()
+  stack_version_unformatted = str(_config['hostLevelParams']['stack_version'])
+
+  # The logic for LZO also exists in OOZIE's params.py
+  io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+  lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+  lzo_packages = get_lzo_packages(stack_version_unformatted)
+
+  exclude_packages = []
+  if not lzo_enabled:
+    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode.py
new file mode 100644
index 0000000..46df454
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode.py
@@ -0,0 +1,203 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory
+from utils import service
+from hdfs import hdfs
+import journalnode_upgrade
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)  
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class JournalNodeDefault(JournalNode):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-journalnode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-journalnode", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    if upgrade_type == "nonrolling":
+      return
+
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+    journalnode_upgrade.post_upgrade_check()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def configure(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              create_parents = True,
+              cd_access="a",
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    env.set_params(params)
+    hdfs()
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    props_value_check = None
+    props_empty_check = ['dfs.journalnode.keytab.file',
+                         'dfs.journalnode.kerberos.principal']
+    props_read_check = ['dfs.journalnode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(hdfs_site_expectations)
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
+                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.journalnode_pid_file]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class JournalNodeWindows(JournalNode):
+  def install(self, env):
+    import install_params
+    self.install_packages(env)
+
+  def start(self, env):
+    import params
+    self.configure(env)
+    Service(params.journalnode_win_service_name, action="start")
+
+  def stop(self, env):
+    import params
+    Service(params.journalnode_win_service_name, action="stop")
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs("journalnode")
+    pass
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.journalnode_win_service_name)
+
+if __name__ == "__main__":
+  JournalNode().execute()


[05/51] [abbrv] ambari git commit: Revert "AMBARI-19025. Add livy.spark.master to livy.conf and update spark-blacklist.conf (Jeff Zhang via smohanty)"

Posted by sm...@apache.org.
Revert "AMBARI-19025. Add livy.spark.master to livy.conf and update spark-blacklist.conf (Jeff Zhang via smohanty)"

This reverts commit 4a565d347a0710b24148a3724d7c3641bdcebddb.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9c16befd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9c16befd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9c16befd

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 9c16befd3950723937d2d411b2ee83e05d4715e3
Parents: 704170e
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Dec 7 22:59:54 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Dec 7 22:59:54 2016 -0800

----------------------------------------------------------------------
 .../services/SPARK/configuration/livy-conf.xml  | 73 --------------------
 .../configuration/livy-spark-blacklist.xml      | 52 --------------
 2 files changed, 125 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9c16befd/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
----------------------------------------------------------------------
diff --git a/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml b/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
deleted file mode 100644
index b7bfa73..0000000
--- a/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-conf.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>livy.environment</name>
-    <value>production</value>
-    <description>
-            Specifies Livy's environment. May either be "production" or "development". In "development"
-            mode, Livy will enable debugging options, such as reporting possible routes on a 404.
-            defaults to development
-        </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>livy.server.port</name>
-    <value>8998</value>
-    <description>
-            What port to start the server on. Defaults to 8998.
-        </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>livy.server.session.timeout</name>
-    <value>3600000</value>
-    <description>
-            Time in milliseconds on how long Livy will wait before timing out an idle session.
-            Default is one hour.
-        </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>livy.impersonation.enabled</name>
-    <value>true</value>
-    <description>
-            If livy should use proxy users when submitting a job.
-        </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>livy.server.csrf_protection.enabled</name>
-    <value>true</value>
-    <description>
-            Whether to enable csrf protection for livy's rest api.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-     <name>livy.spark.master</name>
-     <value>yarn-cluster</value>
-     <description>
-           spark.master property for spark engine
-     </description>
-     <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c16befd/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
----------------------------------------------------------------------
diff --git a/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml b/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
deleted file mode 100644
index d4f27bf..0000000
--- a/src/main/resources/stacks/HDP/2.6/services/SPARK/configuration/livy-spark-blacklist.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <description>spark-blacklist.properties</description>
-    <value>
-#
-# Configuration override / blacklist. Defines a list of properties that users are not allowed
-# to override when starting Spark sessions.
-#
-# This file takes a list of property names (one per line). Empty lines and lines starting with "#"
-# are ignored.
-#
-
-# Disallow overriding the master and the deploy mode.
-spark.master
-spark.submit.deployMode
-
-# Disallow overriding the location of Spark cached jars.
-spark.yarn.jar
-spark.yarn.jars
-spark.yarn.archive
-
-# Don't allow users to override the RSC timeout.
-livy.rsc.server.idle_timeout
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>


[15/51] [abbrv] ambari git commit: AMBARI-19041 Choose services page select/deselect all services not working while adding smartsense to the cluster. (ababiichuk)

Posted by sm...@apache.org.
AMBARI-19041 Choose services page select/deselect all services not working while adding smartsense to the cluster. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/59f520b8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/59f520b8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/59f520b8

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 59f520b85df153616cc2010b23542311e59b50cb
Parents: 1238674
Author: ababiichuk <ab...@hortonworks.com>
Authored: Thu Dec 8 15:40:46 2016 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Thu Dec 8 15:50:37 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/models/stack_service.js       |  4 +-
 ambari-web/test/models/stack_service_test.js | 71 ++++++++++++++++++++++-
 2 files changed, 73 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/59f520b8/ambari-web/app/models/stack_service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 0136ec3..f824b6a 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -46,7 +46,9 @@ App.StackService = DS.Model.extend({
   configs: DS.attr('array'),
   requiredServices: DS.attr('array', {defaultValue: []}),
 
-  isDisabled: Em.computed.or('isMandatory', 'isInstalled'),
+  isDisabled: function () {
+    return this.get('isInstalled') || (this.get('isMandatory') && !App.get('router.clusterInstallCompleted'));
+  }.property('isMandatory', 'isInstalled', 'App.router.clusterInstallCompleted'),
 
   /**
    * @type {String[]}

http://git-wip-us.apache.org/repos/asf/ambari/blob/59f520b8/ambari-web/test/models/stack_service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/stack_service_test.js b/ambari-web/test/models/stack_service_test.js
index 9f12e88..bc101e0 100644
--- a/ambari-web/test/models/stack_service_test.js
+++ b/ambari-web/test/models/stack_service_test.js
@@ -283,7 +283,76 @@ describe('App.StackService', function () {
     });
   });
 
-  App.TestAliases.testAsComputedOr(ss, 'isDisabled', ['isMandatory', 'isInstalled']);
+  describe('#isDisabled', function () {
+
+    var cases = [
+      {
+        isInstalled: true,
+        isMandatory: true,
+        clusterInstallCompleted: true,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: true,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: false,
+        clusterInstallCompleted: true,
+        isDisabled: true
+      },
+      {
+        isInstalled: true,
+        isMandatory: false,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: false,
+        isMandatory: true,
+        clusterInstallCompleted: true,
+        isDisabled: false
+      },
+      {
+        isInstalled: false,
+        isMandatory: true,
+        clusterInstallCompleted: false,
+        isDisabled: true
+      },
+      {
+        isInstalled: false,
+        isMandatory: false,
+        clusterInstallCompleted: true,
+        isDisabled: false
+      },
+      {
+        isInstalled: false,
+        isMandatory: false,
+        clusterInstallCompleted: false,
+        isDisabled: false
+      }
+    ];
+
+    cases.forEach(function (testCase) {
+
+      var title = 'isInstalled: {0}, isMandatory: {1}, clusterInstallCompleted: {2}, isDisabled: {3}'
+        .format(testCase.isInstalled, testCase.isMandatory, testCase.clusterInstallCompleted, testCase.isDisabled);
+
+      it(title, function () {
+        ss.setProperties({
+          isInstalled: testCase.isInstalled,
+          isMandatory: testCase.isMandatory
+        });
+        App.set('router.clusterInstallCompleted', testCase.clusterInstallCompleted);
+        expect(ss.get('isDisabled')).to.equal(testCase.isDisabled);
+      });
+
+    });
+
+  });
 
 
 });


[13/51] [abbrv] ambari git commit: AMBARI-19087. Clean up how dfs.cluster.administrators is handled wrt user/group creation (aonishuk)

Posted by sm...@apache.org.
AMBARI-19087. Clean up how dfs.cluster.administrators is handled wrt user/group creation (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb04efb2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb04efb2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb04efb2

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: eb04efb2375f25cacede71a02442d6c22f135df2
Parents: 338c2c5
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Dec 8 14:00:53 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Dec 8 14:00:53 2016 +0200

----------------------------------------------------------------------
 .../common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 2 +-
 .../2.0.6/hooks/before-ANY/scripts/shared_initialization.py    | 6 +++++-
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb04efb2/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index 22ab02a..aad2db0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -395,7 +395,7 @@
   <property>
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <description>ACL for the admins, this configuration is used to control who can access the default servlets in the namenode, etc. The value should be a comma separated list of users and groups. The user list comes first and is separated by a space followed by the group list, e.g. "user1,user2 group1,group2". Both users and groups are optional, so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2" are all valid (note the leading space in " group1"). '*' grants access to all users and groups, e.g. '*', '* ' and ' *' are all valid.</description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb04efb2/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 320872e..f97789b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -105,13 +105,17 @@ def create_users_and_groups(user_and_groups):
 
   import params
 
-  parts = re.split('\s', user_and_groups)
+  parts = re.split('\s+', user_and_groups)
   if len(parts) == 1:
     parts.append("")
 
   users_list = parts[0].split(",") if parts[0] else []
   groups_list = parts[1].split(",") if parts[1] else []
 
+  # skip creating groups and users if * is provided as value.
+  users_list = filter(lambda x: x != '*' , users_list)
+  groups_list = filter(lambda x: x != '*' , groups_list)
+
   if users_list:
     User(users_list,
           fetch_nonlocal_groups = params.fetch_nonlocal_groups


[26/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..0d46069
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_linux.py
@@ -0,0 +1,476 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries import functions
+from resource_management.libraries.functions import is_empty
+
+import status_params
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+MAPR_SERVER_ROLE_DIRECTORY_MAP = {
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
+}
+
+YARN_SERVER_ROLE_DIRECTORY_MAP = {
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'YARN_CLIENT' : 'hadoop-yarn-client'
+}
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = status_params.stack_name
+stack_root = Script.get_stack_root()
+tarball_map = default("/configurations/cluster-env/tarball_map", None)
+
+config_path = os.path.join(stack_root, "current/hadoop-client/conf")
+config_dir = os.path.realpath(config_path)
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted_major = format_stack_version(stack_version_unformatted)
+stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
+
+stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
+stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+hostname = config['hostname']
+
+# hadoop default parameters
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+
+# hadoop parameters stack supporting rolling_uprade
+if stack_supports_ru:
+  # MapR directory root
+  mapred_role_root = "hadoop-mapreduce-client"
+  command_role = default("/role", "")
+  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
+    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  # YARN directory root
+  yarn_role_root = "hadoop-yarn-client"
+  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
+    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
+  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
+
+  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
+  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
+  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
+
+if stack_supports_timeline_state_store:
+  # Timeline Service property that was added timeline_state_store stack feature
+  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
+
+# ats 1.5 properties
+entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
+entity_groupfs_active_dir_mode = 01777
+entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
+entity_groupfs_store_dir_mode = 0700
+
+hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
+
+limits_conf_dir = "/etc/security/limits.d"
+yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
+yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
+
+mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
+mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
+
+ulimit_cmd = "ulimit -c unlimited;"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_hdfs_user_mode = 0770
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+nm_security_marker_dir = "/var/lib/hadoop-yarn"
+nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
+current_nm_security_state = os.path.isfile(nm_security_marker)
+toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
+is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
+container_executor_mode = 06050 if is_linux_container_executor else 02050
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
+yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
+rm_hosts = config['clusterHostInfo']['rm_host']
+rm_host = rm_hosts[0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
+# TODO UPGRADE default, update site during upgrade
+rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
+yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
+service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
+
+if len(rm_hosts) > 1:
+  additional_rm_host = rm_hosts[1]
+  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
+else:
+  rm_webui_address = format("{rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+if security_enabled:
+  tc_mode = 0644
+  tc_owner = "root"
+else:
+  tc_mode = None
+  tc_owner = hdfs_user
+
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
+if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
+  nm_address = nm_address.replace("0.0.0.0", hostname)
+
+# Initialize lists of work directories.
+nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
+nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
+
+nm_local_dirs_list = nm_local_dirs.split(',')
+nm_log_dirs_list = nm_log_dirs.split(',')
+
+nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
+nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
+
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+entity_file_history_directory = "/tmp/entity-file-history/active"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#exclude file
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = not len(ats_host) == 0
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+
+# don't using len(nm_hosts) here, because check can take too much time on large clusters
+number_of_nm = 1
+
+# default kinit commands
+rm_kinit_cmd = ""
+yarn_timelineservice_kinit_cmd = ""
+nodemanager_kinit_cmd = ""
+
+if security_enabled:
+  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
+  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
+  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
+  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
+
+  # YARN timeline security options
+  if has_ats:
+    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
+
+  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
+    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
+    if _nodemanager_principal_name:
+      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
+
+    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
+    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
+
+
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
+jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
+jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
+
+# Tez-related properties
+tez_user = config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+app_dir_files = {tez_local_api_jars:None}
+
+# Tez libraries
+tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
+
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
+
+# Path to file that contains list of HDFS resources to be skipped during processing
+hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+ )
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']
+
+# Node labels
+node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
+node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
+
+cgroups_dir = "/cgroups_test/cpu"
+
+# ***********************  RANGER PLUGIN CHANGES ***********************
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+# hostname of the active HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+if dfs_ha_namenode_active is not None: 
+  namenode_hostname = dfs_ha_namenode_active
+else:
+  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
+
+ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
+
+scheme = 'http' if not yarn_https_on else 'https'
+yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
+rm_active_port = rm_https_port if yarn_https_on else rm_port
+
+rm_ha_enabled = False
+rm_ha_ids_list = []
+rm_webapp_addresses_list = [yarn_rm_address]
+rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
+
+if rm_ha_ids:
+  rm_ha_ids_list = rm_ha_ids.split(",")
+  if len(rm_ha_ids_list) > 1:
+    rm_ha_enabled = True
+
+if rm_ha_enabled:
+  rm_webapp_addresses_list = []
+  for rm_id in rm_ha_ids_list:
+    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
+    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
+    rm_webapp_addresses_list.append(rm_webapp_address)
+
+#ranger yarn properties
+if has_ranger_admin:
+  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
+
+  if is_supported_yarn_ranger:
+    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
+    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+    xa_audit_db_password = ''
+    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+    xa_db_host = config['configurations']['admin-properties']['db_host']
+    repo_name = str(config['clusterName']) + '_yarn'
+
+    ranger_env = config['configurations']['ranger-env']
+    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
+    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
+    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
+
+    ranger_plugin_config = {
+      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
+      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
+    }
+
+    yarn_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': ranger_plugin_config,
+      'description': 'yarn repo',
+      'name': repo_name,
+      'repositoryType': 'yarn',
+      'type': 'yarn',
+      'assetType': '1'
+    }
+
+    if stack_supports_ranger_kerberos:
+      ranger_plugin_config['ambari.service.check.user'] = policy_user
+      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
+
+    if stack_supports_ranger_kerberos and security_enabled:
+      ranger_plugin_config['policy.download.auth.users'] = yarn_user
+      ranger_plugin_config['tag.download.auth.users'] = yarn_user
+
+    #For curl command in ranger plugin to get db connector
+    jdk_location = config['hostLevelParams']['jdk_location']
+    java_share_dir = '/usr/share/java'
+    previous_jdbc_jar_name = None
+    if stack_supports_ranger_audit_db:
+      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
+        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+        jdbc_driver = "com.mysql.jdbc.Driver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
+        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+        colon_count = xa_db_host.count(':')
+        if colon_count == 2 or colon_count == 0:
+          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+        else:
+          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+        jdbc_driver = "oracle.jdbc.OracleDriver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
+        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+        jdbc_driver = "org.postgresql.Driver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
+        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
+        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+
+    xa_audit_db_is_enabled = False
+    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+    if xml_configurations_supported and stack_supports_ranger_audit_db:
+      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
+    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+    #For SQLA explicitly disable audit to DB for Ranger
+    if xa_audit_db_flavor == 'sqla':
+      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..52918d2e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params_windows.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries import functions
+from resource_management.libraries.functions import is_empty
+import os
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+yarn_user = hadoop_user
+hdfs_user = hadoop_user
+smokeuser = hadoop_user
+config_dir = os.environ["HADOOP_CONF_DIR"]
+hadoop_home = os.environ["HADOOP_HOME"]
+
+yarn_home = os.environ["HADOOP_YARN_HOME"]
+
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+hs_host = config['clusterHostInfo']['hs_host'][0]
+hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
+hs_webui_address = format("{hs_host}:{hs_port}")
+
+hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/resourcemanager.py
new file mode 100644
index 0000000..e053fe6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/resourcemanager.py
@@ -0,0 +1,293 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.libraries.functions.decorator import retry
+from resource_management.core.resources.system import File, Execute
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
+from resource_management import is_empty
+from resource_management import shell
+
+
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from setup_ranger_yarn import setup_ranger_yarn
+
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('resourcemanager', action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name='resourcemanager')
+
+  def refreshqueues(self, env):
+    pass
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ResourcemanagerWindows(Resourcemanager):
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    service('resourcemanager', action='start')
+
+  def status(self, env):
+    service('resourcemanager', action='status')
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    yarn_user = params.yarn_user
+
+    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         mode="f"
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd, user=yarn_user)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ResourcemanagerDefault(Resourcemanager):
+  def get_component_name(self):
+    return "hadoop-yarn-resourcemanager"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    if params.has_ranger_admin and params.is_supported_yarn_ranger:
+      setup_ranger_yarn() #Ranger Yarn Plugin related calls
+
+    # wait for active-dir and done-dir to be created by ATS if needed
+    if params.has_ats:
+      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
+      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
+
+    service('resourcemanager', action='start')
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.resourcemanager.principal",
+                           "yarn.resourcemanager.keytab",
+                           "yarn.resourcemanager.webapp.spnego-principal",
+                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.resourcemanager.keytab",
+                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
+                                security_params['yarn-site']['yarn.resourcemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def refreshqueues(self, env):
+    import params
+
+    self.configure(env)
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='refreshQueues'
+    )
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    rm_kinit_cmd = params.rm_kinit_cmd
+    yarn_user = params.yarn_user
+    conf_dir = params.hadoop_conf_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd,
+            environment= {'PATH' : params.execute_path },
+            user=yarn_user)
+      pass
+    pass
+
+
+
+
+  def wait_for_dfs_directories_created(self, *dirs):
+    import params
+
+    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
+
+    if params.security_enabled:
+      Execute(params.rm_kinit_cmd,
+              user=params.yarn_user
+      )
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+
+    for dir_path in dirs:
+      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
+
+
+  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
+  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
+    import params
+
+
+    if not is_empty(dir_path):
+      dir_path = HdfsResourceProvider.parse_path(dir_path)
+
+      if dir_path in ignored_dfs_dirs:
+        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
+        return
+
+      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
+
+      dir_exists = None
+
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hdfs dfs -test
+        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
+        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hdfs dfs -d check.
+        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
+        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
+
+      if not dir_exists:
+        raise Fail("DFS directory '" + dir_path + "' does not exist !")
+      else:
+        Logger.info("DFS directory '" + dir_path + "' exists.")
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.resourcemanager_pid_file]
+  
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service.py
new file mode 100644
index 0000000..78b2428
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.shell import as_user, as_sudo
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def service(componentName, action='start', serviceName='yarn'):
+  import status_params
+  if status_params.service_map.has_key(componentName):
+    service_name = status_params.service_map[componentName]
+    if action == 'start' or action == 'stop':
+      Service(service_name, action=action)
+    elif action == 'status':
+      check_windows_service_status(service_name)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def service(componentName, action='start', serviceName='yarn'):
+  import params
+
+  if serviceName == 'mapreduce' and componentName == 'historyserver':
+    delete_pid_file = True
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
+    usr = params.mapred_user
+    log_dir = params.mapred_log_dir
+  else:
+    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
+    # may not work correctly when stopping the service
+    delete_pid_file = False
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
+    usr = params.yarn_user
+    log_dir = params.yarn_log_dir
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
+    check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
+
+    # Remove the pid file if its corresponding process is not running.
+    File(pid_file, action = "delete", not_if = check_process)
+
+    if componentName == 'timelineserver' and serviceName == 'yarn':
+      File(params.ats_leveldb_lock_file,
+         action = "delete",
+         only_if = format("ls {params.ats_leveldb_lock_file}"),
+         not_if = check_process,
+         ignore_failures = True
+      )
+
+    try:
+      # Attempt to start the process. Internally, this is skipped if the process is already running.
+      Execute(daemon_cmd, user = usr, not_if = check_process)
+  
+      # Ensure that the process with the expected PID exists.
+      Execute(check_process,
+              not_if = check_process,
+              tries=5,
+              try_sleep=1,
+      )
+    except:
+      show_logs(log_dir, usr)
+      raise
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {componentName}")
+    try:
+      Execute(daemon_cmd, user=usr)
+    except:
+      show_logs(log_dir, usr)
+      raise
+
+    # !!! yarn-daemon doesn't need us to delete PIDs
+    if delete_pid_file is True:
+      File(pid_file, action="delete")
+
+
+  elif action == 'refreshQueues':
+    rm_kinit_cmd = params.rm_kinit_cmd
+    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
+    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service_check.py
new file mode 100644
index 0000000..b934767
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/service_check.py
@@ -0,0 +1,185 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import re
+import subprocess
+from ambari_commons import os_utils
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.core import shell
+
+CURL_CONNECTION_TIMEOUT = '5'
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ServiceCheckWindows(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
+
+    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
+    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
+    validateStatusFileName = "validateYarnComponentStatusWindows.py"
+    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+    if params.security_enabled:
+      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName)
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ServiceCheckDefault(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.smokeuser,
+                        mode=params.smoke_hdfs_user_mode,
+                        )
+
+    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
+      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
+    else:
+      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
+
+    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
+                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
+                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
+                                           "--queue", "{service_check_queue_name}"]
+    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
+    else:
+      smoke_cmd = yarn_distrubuted_shell_check_cmd
+
+    return_code, out = shell.checked_call(smoke_cmd,
+                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                          user=params.smokeuser,
+                                          )
+
+    m = re.search("appTrackingUrl=(.*),\s", out)
+    app_url = m.group(1)
+
+    splitted_app_url = str(app_url).split('/')
+
+    for item in splitted_app_url:
+      if "application" in item:
+        application_name = item
+
+    # Find out the active RM from RM list
+    # Raise an exception if the active rm cannot be determined
+    active_rm_webapp_address = self.get_active_rm_webapp_address()
+    Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
+
+    # Verify job state from active resource manager via rest api
+    info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+
+    return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
+                                                  user=params.smokeuser,
+                                                  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                  )
+
+    try:
+      json_response = json.loads(stdout)
+    except Exception as e:
+      raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
+
+    if json_response is None or 'app' not in json_response or \
+            'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
+      raise Fail("Application " + app_url + " returns invalid data.")
+
+    if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+      raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+
+  def get_active_rm_webapp_address(self):
+    import params
+    active_rm_webapp_address = None
+    rm_webapp_addresses = params.rm_webapp_addresses_list
+    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
+      for rm_webapp_address in rm_webapp_addresses:
+        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
+        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
+        try:
+          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
+                                                        user=params.smokeuser,
+                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                        )
+          json_response = json.loads(stdout)
+          if json_response is not None and 'clusterInfo' in json_response \
+            and json_response['clusterInfo']['haState'] == "ACTIVE":
+              active_rm_webapp_address = rm_webapp_address
+              break
+        except Exception as e:
+          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
+
+    if active_rm_webapp_address is None:
+      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
+    return active_rm_webapp_address
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/setup_ranger_yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/setup_ranger_yarn.py
new file mode 100644
index 0000000..6ea7f82
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/setup_ranger_yarn.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_yarn():
+  import params
+
+  if params.has_ranger_admin:
+
+    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retryAble:
+      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/yarn",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.yarn_user,
+                         group=params.yarn_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.yarn_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
+                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
+                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
+                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
+                        is_security_enabled = params.security_enabled,
+                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
+                        component_user_keytab=params.rm_keytab if params.security_enabled else None
+      )
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/status_params.py
new file mode 100644
index 0000000..c2e9d92
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/status_params.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries import functions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from ambari_commons import OSCheck
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+if OSCheck.is_windows_family():
+  resourcemanager_win_service_name = 'resourcemanager'
+  nodemanager_win_service_name = 'nodemanager'
+  historyserver_win_service_name = 'historyserver'
+  timelineserver_win_service_name = 'timelineserver'
+
+  service_map = {
+    'resourcemanager' : resourcemanager_win_service_name,
+    'nodemanager' : nodemanager_win_service_name,
+    'historyserver' : historyserver_win_service_name,
+    'timelineserver' : timelineserver_win_service_name
+  }
+else:
+  mapred_user = config['configurations']['mapred-env']['mapred_user']
+  yarn_user = config['configurations']['yarn-env']['yarn_user']
+  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+
+  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
+
+  hostname = config['hostname']
+  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn.py
new file mode 100644
index 0000000..70ed5b3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn.py
@@ -0,0 +1,498 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+import os
+
+# Ambari Common and Resource Management Imports
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.source import InlineTemplate, Template
+from resource_management.core.logger import Logger
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def yarn(name=None, config_dir=None):
+  """
+  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
+  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
+  """
+  import params
+
+  if name == 'resourcemanager':
+    setup_resourcemanager()
+  elif name == 'nodemanager':
+    setup_nodemanager()
+  elif name == 'apptimelineserver':
+    setup_ats()
+  elif name == 'historyserver':
+    setup_historyserver()
+
+  if config_dir is None:
+    config_dir = params.hadoop_conf_dir
+
+  if params.yarn_nodemanager_recovery_dir:
+    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents = True,
+              mode=0755,
+              cd_access = 'a',
+    )
+
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access = 'a',
+  )
+
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access = 'a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents = True,
+            ignore_failures=True,
+            cd_access = 'a',
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  # During RU, Core Masters and Slaves need hdfs-site.xml
+  # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
+  # RU should rely on all available in <stack-root>/<version>/hadoop/conf
+  XmlConfig("hdfs-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(os.path.join(config_dir, "yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=InlineTemplate(params.yarn_env_sh_template)
+  )
+
+  File(format("{yarn_container_bin}/container-executor"),
+      group=params.yarn_executor_container_group,
+      mode=params.container_executor_mode
+  )
+
+  File(os.path.join(config_dir, "container-executor.cfg"),
+      group=params.user_group,
+      mode=0644,
+      content=Template('container-executor.cfg.j2')
+  )
+
+  Directory(params.cgroups_dir,
+            group=params.user_group,
+            create_parents = True,
+            mode=0755,
+            cd_access="a")
+
+  File(os.path.join(config_dir, "mapred-env.sh"),
+       owner=params.tc_owner,
+       mode=0755,
+       content=InlineTemplate(params.mapred_env_sh_template)
+  )
+
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    File(os.path.join(config_dir, 'taskcontroller.cfg'),
+         owner = params.tc_owner,
+         mode = params.tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(config_dir, 'taskcontroller.cfg'),
+         owner=params.tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.mapred_user,
+            group=params.user_group
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations'][
+              'capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "ssl-client" in params.config['configurations']:
+    XmlConfig("ssl-client.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    Directory(params.hadoop_conf_secure_dir,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              cd_access='a',
+              )
+
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_secure_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-server" in params.config['configurations']:
+    XmlConfig("ssl-server.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
+    File(os.path.join(config_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(config_dir, 'ssl-client.xml.example')):
+    File(os.path.join(config_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(config_dir, 'ssl-server.xml.example')):
+    File(os.path.join(config_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+def setup_historyserver():
+  import params
+
+  if params.yarn_log_aggregation_enabled:
+    params.HdfsResource(params.yarn_nm_app_log_dir,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=01777,
+                         recursive_chmod=True
+    )
+
+  # create the /tmp folder with proper permissions if it doesn't exist yet
+  if params.entity_file_history_directory.startswith('/tmp'):
+      params.HdfsResource(params.hdfs_tmp_dir,
+                          action="create_on_execute",
+                          type="directory",
+                          owner=params.hdfs_user,
+                          mode=0777,
+      )
+
+  params.HdfsResource(params.entity_file_history_directory,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group
+  )
+  params.HdfsResource("/mapred",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user
+  )
+  params.HdfsResource("/mapred/system",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user
+  )
+  params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user,
+                       group=params.user_group,
+                       change_permissions_for_parents=True,
+                       mode=0777
+  )
+  params.HdfsResource(None, action="execute")
+  Directory(params.jhs_leveldb_state_store_dir,
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+            recursive_ownership = True,
+            )
+
+def setup_nodemanager():
+  import params
+
+  # First start after enabling/disabling security
+  if params.toggle_nm_security:
+    Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
+              action='delete'
+    )
+
+    # If yarn.nodemanager.recovery.dir exists, remove this dir
+    if params.yarn_nodemanager_recovery_dir:
+      Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+                action='delete'
+      )
+
+    # Setting NM marker file
+    if params.security_enabled:
+      Directory(params.nm_security_marker_dir)
+      File(params.nm_security_marker,
+           content="Marker file to track first start after enabling/disabling security. "
+                   "During first start yarn local, log dirs are removed and recreated"
+           )
+    elif not params.security_enabled:
+      File(params.nm_security_marker, action="delete")
+
+
+  if not params.security_enabled or params.toggle_nm_security:
+    # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+    nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
+    # create a history file used by handle_mounted_dirs
+    File(params.nm_log_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_log_dir_to_mount_file_content
+    )
+    nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
+    File(params.nm_local_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_local_dir_to_mount_file_content
+    )
+
+def setup_resourcemanager():
+  import params
+
+  Directory(params.rm_nodes_exclude_dir,
+       mode=0755,
+       create_parents=True,
+       cd_access='a',
+  )
+  File(params.rm_nodes_exclude_path,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+  File(params.yarn_job_summary_log,
+     owner=params.yarn_user,
+     group=params.user_group
+  )
+  if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
+    params.HdfsResource(params.node_labels_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         change_permissions_for_parents=True,
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=0700
+    )
+    params.HdfsResource(None, action="execute")
+
+def setup_ats():
+  import params
+
+  Directory(params.ats_leveldb_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+  )
+
+  # if stack support application timeline-service state store property (timeline_state_store stack feature)
+  if params.stack_supports_timeline_state_store:
+    Directory(params.ats_leveldb_state_store_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+    )
+  # app timeline server 1.5 directories
+  if not is_empty(params.entity_groupfs_store_dir):
+    parent_path = os.path.dirname(params.entity_groupfs_store_dir)
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_store_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_store_dir_mode
+                        )
+  if not is_empty(params.entity_groupfs_active_dir):
+    parent_path = os.path.dirname(params.entity_groupfs_active_dir)
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_active_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_active_dir_mode
+                        )
+  params.HdfsResource(None, action="execute")
+
+def create_log_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0775,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+  )
+
+def create_local_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
+  )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def yarn(name = None):
+  import params
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            mode='f',
+            configuration_attributes=params.config['configuration_attributes']['yarn-site']
+  )
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.yarn_user,
+                  password = Script.get_password(params.yarn_user))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn_client.py
new file mode 100644
index 0000000..beea8b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/yarn_client.py
@@ -0,0 +1,67 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from yarn import yarn
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class YarnClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class YarnClientWindows(YarnClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class YarnClientDefault(YarnClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/container-executor.cfg.j2
new file mode 100644
index 0000000..c6f1ff6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/container-executor.cfg.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hdfs,yarn,mapred,bin
+min.user.id={{min_user_id}}


[42/51] [abbrv] ambari git commit: AMBARI-18801. Druid Router should be specified as a MASTER instead of SLAVE (Nishant Bangarwa via smohanty)

Posted by sm...@apache.org.
AMBARI-18801. Druid Router should be specified as a MASTER instead of SLAVE (Nishant Bangarwa via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5e2c267f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5e2c267f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5e2c267f

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 5e2c267f77639e8d9a8dbd5884f681c95781f4b5
Parents: d845449
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Dec 8 21:36:07 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Dec 8 21:36:07 2016 -0800

----------------------------------------------------------------------
 .../src/main/resources/common-services/DRUID/0.9.2/metainfo.xml  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5e2c267f/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
index c897f12..abe6e8c 100644
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
@@ -127,8 +127,8 @@
         <component>
           <name>DRUID_ROUTER</name>
           <displayName>Druid Router</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
           <dependencies>
             <dependency>
               <name>HDFS/HDFS_CLIENT</name>


[47/51] [abbrv] ambari git commit: Revert "Revert "AMBARI-18888: Ambari-agent: Create configuration files with JCEKS information""

Posted by sm...@apache.org.
Revert "Revert "AMBARI-18888: Ambari-agent: Create configuration files with JCEKS information""

Set the value of credential store enabled on the execution command in AmbariManagementController::createHostAction() where the cluster and service are available.

This reverts commit 60a6bd4575fb87fc26c4a277cbabf850ef2089e1.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/24ac5cda
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/24ac5cda
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/24ac5cda

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 24ac5cdae228c0f421034f5c897f0635eb9bf52e
Parents: 2a84081
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Fri Dec 2 08:23:43 2016 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Fri Dec 9 07:56:06 2016 -0800

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |   3 +
 .../ambari_agent/CustomServiceOrchestrator.py   | 120 +++++++++++++++++++
 ambari-agent/src/packages/tarball/all.xml       |  30 +++++
 .../ambari/server/agent/ExecutionCommand.java   |  28 +++++
 .../ambari/server/agent/HeartBeatHandler.java   |   2 +-
 .../AmbariManagementControllerImpl.java         |   5 +
 .../server/agent/TestHeartbeatHandler.java      |  28 +++--
 7 files changed, 207 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index c1d4c02..f2c8846 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -46,6 +46,9 @@ keysdir=/var/lib/ambari-agent/keys
 server_crt=ca.crt
 passphrase_env_var_name=AMBARI_PASSPHRASE
 ssl_verify_cert=0
+credential_lib_dir=/var/lib/ambari-agent/cred/lib
+credential_conf_dir=/var/lib/ambari-agent/cred/conf
+credential_shell_cmd=org.apache.hadoop.security.alias.CredentialShell
 
 [services]
 pidLookupPath=/var/run/

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index 11c8cbe..f9ed4cf 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -29,6 +29,8 @@ from FileCache import FileCache
 from AgentException import AgentException
 from PythonExecutor import PythonExecutor
 from PythonReflectiveExecutor import PythonReflectiveExecutor
+from resource_management.core.utils import PasswordString
+import subprocess
 import Constants
 import hostname
 
@@ -65,6 +67,11 @@ class CustomServiceOrchestrator():
   REFLECTIVELY_RUN_COMMANDS = FREQUENT_COMMANDS # -- commands which run a lot and often (this increases their speed)
   DONT_BACKUP_LOGS_FOR_COMMANDS = FREQUENT_COMMANDS
 
+  # Path where hadoop credential JARS will be available
+  DEFAULT_CREDENTIAL_SHELL_LIB_PATH = '/var/lib/ambari-agent/cred/lib'
+  DEFAULT_CREDENTIAL_CONF_DIR = '/var/lib/ambari-agent/cred/conf'
+  DEFAULT_CREDENTIAL_SHELL_CMD = 'org.apache.hadoop.security.alias.CredentialShell'
+
   def __init__(self, config, controller):
     self.config = config
     self.tmp_dir = config.get('agent', 'prefix')
@@ -78,6 +85,14 @@ class CustomServiceOrchestrator():
     # cache reset will be called on every agent registration
     controller.registration_listeners.append(self.file_cache.reset)
 
+    # Construct the hadoop credential lib JARs path
+    self.credential_shell_lib_path = os.path.join(config.get('security', 'credential_lib_dir',
+                                                             self.DEFAULT_CREDENTIAL_SHELL_LIB_PATH), '*')
+
+    self.credential_conf_dir = config.get('security', 'credential_conf_dir', self.DEFAULT_CREDENTIAL_CONF_DIR)
+
+    self.credential_shell_cmd = config.get('security', 'credential_shell_cmd', self.DEFAULT_CREDENTIAL_SHELL_CMD)
+
     # Clean up old status command files if any
     try:
       os.unlink(self.status_commands_stdout)
@@ -114,6 +129,102 @@ class CustomServiceOrchestrator():
     else:
       return PythonExecutor(self.tmp_dir, self.config)
 
+  def getProviderDirectory(self, service_name):
+    """
+    Gets the path to the service conf folder where the JCEKS file will be created.
+
+    :param service_name: Name of the service, for example, HIVE
+    :return: lower case path to the service conf folder
+    """
+
+    # The stack definition scripts of the service can move the
+    # JCEKS file around to where it wants, which is usually
+    # /etc/<service_name>/conf
+
+    conf_dir = os.path.join(self.credential_conf_dir, service_name.lower())
+    if not os.path.exists(conf_dir):
+      os.makedirs(conf_dir, 0644)
+
+    return conf_dir
+
+  def getAffectedConfigTypes(self, commandJson):
+    """
+    Gets the affected config types for the service in this command
+
+    :param commandJson:
+    :return:
+    """
+    return commandJson.get('configuration_attributes')
+
+  def getCredentialProviderPropertyName(self):
+    """
+    Gets the property name used by the hadoop credential provider
+    :return:
+    """
+    return 'hadoop.security.credential.provider.path'
+
+  def generateJceks(self, commandJson):
+    """
+    Generates the JCEKS file with passwords for the service specified in commandJson
+
+    :param commandJson: command JSON
+    :return: An exit value from the external process that generated the JCEKS file. None if
+    there are no passwords in the JSON.
+    """
+    cmd_result = None
+    roleCommand = None
+    if 'roleCommand' in commandJson:
+      roleCommand = commandJson['roleCommand']
+
+    logger.info('generateJceks: roleCommand={0}'.format(roleCommand))
+
+    # Password properties for a config type, if present,
+    # are under configuration_attributes:config_type:hidden:{prop1:attributes1, prop2, attributes2}
+    passwordProperties = {}
+    config_types = self.getAffectedConfigTypes(commandJson)
+    for config_type in config_types:
+      elem = config_types.get(config_type)
+      hidden = elem.get('hidden')
+      if hidden is not None:
+        passwordProperties[config_type] = hidden
+
+    # Set up the variables for the external command to generate a JCEKS file
+    java_home = commandJson['hostLevelParams']['java_home']
+    java_bin = '{java_home}/bin/java'.format(java_home=java_home)
+
+    cs_lib_path = self.credential_shell_lib_path
+    serviceName = commandJson['serviceName']
+
+    # Gather the password values and remove them from the configuration
+    configs = commandJson.get('configurations')
+    for key, value in passwordProperties.items():
+      config = configs.get(key)
+      if config is not None:
+        file_path = os.path.join(self.getProviderDirectory(serviceName), "{0}.jceks".format(key))
+        if os.path.exists(file_path):
+          os.remove(file_path)
+        provider_path = 'jceks://file{file_path}'.format(file_path=file_path)
+        logger.info('provider_path={0}'.format(provider_path))
+        for alias in value:
+          pwd = config.get(alias)
+          if pwd is not None:
+            # Remove the clear text password
+            config.pop(alias, None)
+            # Add JCEKS provider path instead
+            config[self.getCredentialProviderPropertyName()] = provider_path
+            logger.debug("config={0}".format(config))
+            protected_pwd = PasswordString(pwd)
+            # Generate the JCEKS file
+            cmd = (java_bin, '-cp', cs_lib_path, self.credential_shell_cmd, 'create',
+                   alias, '-value', protected_pwd, '-provider', provider_path)
+            logger.info(cmd)
+            cmd_result = subprocess.call(cmd)
+            logger.info('cmd_result = {0}'.format(cmd_result))
+            os.chmod(file_path, 0644) # group and others should have read access so that the service user can read
+
+    return cmd_result
+
+
   def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name=None,
                  override_output_files=True, retry=False):
     """
@@ -174,6 +285,15 @@ class CustomServiceOrchestrator():
         handle.on_background_command_started = self.map_task_to_process
         del command['__handle']
 
+      # If command contains credentialStoreEnabled, then
+      # generate the JCEKS file for the configurations.
+      credentialStoreEnabled = False
+      if 'credentialStoreEnabled' in command:
+        credentialStoreEnabled = (command['credentialStoreEnabled'] == "true")
+
+      if credentialStoreEnabled == True:
+        self.generateJceks(command)
+
       json_path = self.dump_command_to_json(command, retry)
       pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
           self.PRE_HOOK_PREFIX, command_name, script_type)

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-agent/src/packages/tarball/all.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/src/packages/tarball/all.xml b/ambari-agent/src/packages/tarball/all.xml
index c481208..f8a54e3 100644
--- a/ambari-agent/src/packages/tarball/all.xml
+++ b/ambari-agent/src/packages/tarball/all.xml
@@ -190,4 +190,34 @@
       <outputDirectory>/var/lib/${project.artifactId}/data</outputDirectory>
     </file>
   </files>
+  <moduleSets>
+    <moduleSet>
+      <binaries>
+        <includeDependencies>false</includeDependencies>
+        <outputDirectory>/var/lib/${project.artifactId}/cred/lib</outputDirectory>
+        <unpack>false</unpack>
+        <directoryMode>755</directoryMode>
+        <fileMode>644</fileMode>
+        <dependencySets>
+          <dependencySet>
+            <outputDirectory>/var/lib/${project.artifactId}/cred/lib</outputDirectory>
+            <unpack>false</unpack>
+            <includes>
+              <include>commons-cli:commons-cli</include>
+              <include>commons-collections:commons-collections</include>
+              <include>commons-configuration:commons-configuration</include>
+              <include>commons-io:commons-io:jar:${commons.io.version}</include>
+              <include>commons-lang:commons-lang</include>
+              <include>commons-logging:commons-logging</include>
+              <include>com.google.guava:guava</include>
+              <include>org.slf4j:slf4j-api</include>
+              <include>org.apache.hadoop:hadoop-common</include>
+              <include>org.apache.hadoop:hadoop-auth</include>
+              <include>org.apache.htrace:htrace-core</include>
+            </includes>
+          </dependencySet>
+        </dependencySets>
+      </binaries>
+    </moduleSet>
+  </moduleSets>
 </assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index ef1ee4f..e46167a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -112,6 +112,13 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("availableServices")
   private Map<String, String> availableServices = new HashMap<>();
 
+  /**
+   * "true" or "false" indicating whether this
+   * service is enabled for credential store use.
+   */
+  @SerializedName("credentialStoreEnabled")
+  private String credentialStoreEnabled;
+
   public String getCommandId() {
     return commandId;
   }
@@ -295,6 +302,27 @@ public class ExecutionCommand extends AgentCommand {
 	this.serviceType = serviceType;
   }
 
+  /**
+   * Get a value indicating whether this service is enabled
+   * for credential store use.
+   *
+   * @return "true" or "false", any other value is
+   * considered as "false"
+   */
+  public String getCredentialStoreEnabled() {
+    return credentialStoreEnabled;
+  }
+
+  /**
+   * Set a value indicating whether this service is enabled
+   * for credential store use.
+   *
+   * @param credentialStoreEnabled
+   */
+  public void setCredentialStoreEnabled(String credentialStoreEnabled) {
+    this.credentialStoreEnabled = credentialStoreEnabled;
+  }
+
   public String getComponentName() {
     return componentName;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
index 75bef30..a25b875 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
@@ -329,7 +329,7 @@ public class HeartBeatHandler {
           case BACKGROUND_EXECUTION_COMMAND:
           case EXECUTION_COMMAND: {
             ExecutionCommand ec = (ExecutionCommand)ac;
-            LOG.info("HeartBeatHandler.sendCommands: sending ExecutionCommand for host {}, role {}, roleCommand {}, and command ID {}, taskId {}",
+            LOG.info("HeartBeatHandler.sendCommands: sending ExecutionCommand for host {}, role {}, roleCommand {}, and command ID {}, task ID {}",
                      ec.getHostname(), ec.getRole(), ec.getRoleCommand(), ec.getCommandId(), ec.getTaskId());
             Map<String, String> hlp = ec.getHostLevelParams();
             if (hlp != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index c3cd82e..9bf046b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2139,6 +2139,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     execCmd.setConfigurationAttributes(configurationAttributes);
     execCmd.setConfigurationTags(configTags);
 
+    // Get the value of credential store enabled from the DB
+    Service clusterService = cluster.getService(serviceName);
+    execCmd.setCredentialStoreEnabled(String.valueOf(clusterService.isCredentialStoreEnabled()));
+
     // Create a local copy for each command
     Map<String, String> commandParams = new TreeMap<String, String>();
     if (commandParamsInp != null) { // if not defined
@@ -2345,6 +2349,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     execCmd.setAvailableServicesFromServiceInfoMap(ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion()));
 
+
     if ((execCmd != null) && (execCmd.getConfigurationTags().containsKey("cluster-env"))) {
       LOG.debug("AmbariManagementControllerImpl.createHostAction: created ExecutionCommand for host {}, role {}, roleCommand {}, and command ID {}, with cluster-env tags {}",
         execCmd.getHostname(), execCmd.getRole(), execCmd.getRoleCommand(), execCmd.getCommandId(), execCmd.getConfigurationTags().get("cluster-env").get("tag"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/24ac5cda/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index a50a116..ac58f64 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -48,6 +48,7 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
@@ -171,22 +172,31 @@ public class TestHeartbeatHandler {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(new ArrayList<HostRoleCommand>());
     replay(am);
+
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    Collection<Host> hosts = cluster.getHosts();
+    assertEquals(hosts.size(), 1);
+
     Clusters fsm = clusters;
-    fsm.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
+    Host hostObject = hosts.iterator().next();
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
     hostObject.setOsType(DummyOsType);
 
+    String hostname = hostObject.getHostName();
     ActionQueue aq = new ActionQueue();
 
     HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
+    hi.setHostName(hostname);
     hi.setOS(DummyOs);
     hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
+    reg.setHostname(hostname);
     reg.setHardwareProfile(hi);
     reg.setAgentVersion(metaInfo.getServerVersion());
     handler.handleRegistration(reg);
@@ -195,19 +205,21 @@ public class TestHeartbeatHandler {
 
     ExecutionCommand execCmd = new ExecutionCommand();
     execCmd.setRequestAndStage(2, 34);
-    execCmd.setHostname(DummyHostname1);
-    aq.enqueue(DummyHostname1, new ExecutionCommand());
+    execCmd.setHostname(hostname);
+    execCmd.setClusterName(cluster.getClusterName());
+    execCmd.setServiceName(HDFS);
+    aq.enqueue(hostname, execCmd);
     HeartBeat hb = new HeartBeat();
     hb.setResponseId(0);
     HostStatus hs = new HostStatus(Status.HEALTHY, DummyHostStatus);
     List<Alert> al = new ArrayList<Alert>();
     al.add(new Alert());
     hb.setNodeStatus(hs);
-    hb.setHostname(DummyHostname1);
+    hb.setHostname(hostname);
 
     handler.handleHeartBeat(hb);
     assertEquals(HostState.HEALTHY, hostObject.getState());
-    assertEquals(0, aq.dequeueAll(DummyHostname1).size());
+    assertEquals(0, aq.dequeueAll(hostname).size());
   }
 
 


[34/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode_upgrade.py
new file mode 100644
index 0000000..7585107
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/journalnode_upgrade.py
@@ -0,0 +1,152 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import time
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.core.exceptions import Fail
+import utils
+from resource_management.libraries.functions.jmx import get_value_from_jmx
+import namenode_ha_state
+from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
+from utils import get_dfsadmin_base_command
+
+
+def post_upgrade_check():
+  """
+  Ensure all journal nodes are up and quorum is established during Rolling Upgrade.
+  :return:
+  """
+  import params
+  Logger.info("Ensuring Journalnode quorum is established")
+
+  if params.security_enabled:
+    # We establish HDFS identity instead of JN Kerberos identity
+    # since this is an administrative HDFS call that requires the HDFS administrator user to perform.
+    Execute(params.hdfs_kinit_cmd, user=params.hdfs_user)
+
+  time.sleep(5)
+  hdfs_roll_edits()
+  time.sleep(5)
+
+  all_journal_node_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+
+  if len(all_journal_node_hosts) < 3:
+    raise Fail("Need at least 3 Journalnodes to maintain a quorum")
+
+  try:
+    namenode_ha = namenode_ha_state.NamenodeHAState()
+  except ValueError, err:
+    raise Fail("Could not retrieve Namenode HA addresses. Error: " + str(err))
+
+  Logger.info(str(namenode_ha))
+  nn_address = namenode_ha.get_address(NAMENODE_STATE.ACTIVE)
+
+  nn_data = utils.get_jmx_data(nn_address, 'org.apache.hadoop.hdfs.server.namenode.FSNamesystem', 'JournalTransactionInfo',
+                         namenode_ha.is_encrypted(), params.security_enabled)
+  if not nn_data:
+    raise Fail("Could not retrieve JournalTransactionInfo from JMX")
+
+  try:
+    last_txn_id = int(nn_data['LastAppliedOrWrittenTxId'])
+    success = ensure_jns_have_new_txn(all_journal_node_hosts, last_txn_id)
+
+    if not success:
+      raise Fail("Could not ensure that all Journal nodes have a new log transaction id")
+  except KeyError:
+    raise Fail("JournalTransactionInfo does not have key LastAppliedOrWrittenTxId from JMX info")
+
+
+def hdfs_roll_edits():
+  """
+  HDFS_CLIENT needs to be a dependency of JOURNALNODE
+  Roll the logs so that Namenode will be able to connect to the Journalnode.
+  Must kinit before calling this command.
+  """
+  import params
+
+  # TODO, this will need to be doc'ed since existing clusters will need HDFS_CLIENT on all JOURNALNODE hosts
+  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+  command = dfsadmin_base_command + ' -rollEdits'
+  Execute(command, user=params.hdfs_user, tries=1)
+
+
+def ensure_jns_have_new_txn(nodelist, last_txn_id):
+  """
+  :param nodelist: List of Journalnodes
+  :param last_txn_id: Integer of last transaction id
+  :return: Return true on success, false otherwise
+  """
+  import params
+
+  jn_uri = default("/configurations/hdfs-site/dfs.namenode.shared.edits.dir", None)
+
+  if jn_uri is None:
+    raise Fail("No JournalNode URI found at hdfs-site/dfs.namenode.shared.edits.dir")
+
+  nodes = []
+  for node in nodelist:
+    if node in jn_uri:
+      nodes.append(node)
+
+  num_of_jns = len(nodes)
+  actual_txn_ids = {}
+  jns_updated = 0
+
+  if params.journalnode_address is None:
+    raise Fail("Could not retrieve JournalNode address")
+
+  if params.journalnode_port is None:
+    raise Fail("Could not retrieve JournalNode port")
+
+  time_out_secs = 3 * 60
+  step_time_secs = 10
+  iterations = int(time_out_secs/step_time_secs)
+
+  protocol = "https" if params.https_only else "http"
+
+  Logger.info("Checking if all JournalNodes are updated.")
+  for i in range(iterations):
+    Logger.info('Try %d out of %d' % (i+1, iterations))
+    for node in nodes:
+      # if all JNS are updated break
+      if jns_updated == num_of_jns:
+        Logger.info("All journal nodes are updated")
+        return True
+
+      # JN already meets condition, skip it
+      if node in actual_txn_ids and actual_txn_ids[node] and actual_txn_ids[node] >= last_txn_id:
+        continue
+
+      url = '%s://%s:%s' % (protocol, node, params.journalnode_port)
+      data = utils.get_jmx_data(url, 'Journal-', 'LastWrittenTxId', params.https_only, params.security_enabled)
+      if data:
+        actual_txn_ids[node] = int(data)
+        if actual_txn_ids[node] >= last_txn_id:
+          Logger.info("JournalNode %s has a higher transaction id: %s" % (node, str(data)))
+          jns_updated += 1
+        else:
+          Logger.info("JournalNode %s is still on transaction id: %s" % (node, str(data)))
+
+    Logger.info("Sleeping for %d secs" % step_time_secs)
+    time.sleep(step_time_secs)
+
+  return jns_updated == num_of_jns
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode.py
new file mode 100644
index 0000000..86f68e5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode.py
@@ -0,0 +1,424 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import time
+import json
+import tempfile
+from datetime import datetime
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.core import shell
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_user
+from resource_management.core.logger import Logger
+
+
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+
+import namenode_upgrade
+from hdfs_namenode import namenode, wait_for_safemode_off
+from hdfs import hdfs
+import hdfs_rebalance
+from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
+
+
+
+# hashlib is supplied as of Python 2.5 as the replacement interface for md5
+# and other secure hashes.  In 2.6, md5 is deprecated.  Import hashlib if
+# available, avoiding a deprecation warning under 2.6.  Import md5 otherwise,
+# preserving 2.4 compatibility.
+try:
+  import hashlib
+  _md5 = hashlib.md5
+except ImportError:
+  import md5
+  _md5 = md5.new
+
+class NameNode(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-namenode"
+
+  def get_hdfs_binary(self):
+    """
+    Get the name or path to the hdfs binary depending on the component name.
+    """
+    component_name = self.get_component_name()
+    return get_hdfs_binary(component_name)
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    #TODO we need this for HA because of manual steps
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs("namenode")
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type,
+      upgrade_suspended=params.upgrade_suspended, env=env)
+
+    # after starting NN in an upgrade, touch the marker file
+    if upgrade_type is not None:
+      # place a file on the system indicating that we've submitting the command that
+      # instructs NN that it is now part of an upgrade
+      namenode_upgrade.create_upgrade_marker()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hdfs_binary = self.get_hdfs_binary()
+    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+      if params.dfs_ha_automatic_failover_enabled:
+        initiate_safe_zkfc_failover()
+      else:
+        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
+    namenode(action="stop", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    namenode(action="status", env=env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="decommission", hdfs_binary=hdfs_binary)
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class NameNodeDefault(NameNode):
+
+  def restore_snapshot(self, env):
+    """
+    Restore the snapshot during a Downgrade.
+    """
+    print "TODO AMBARI-12698"
+    pass
+
+  def prepare_express_upgrade(self, env):
+    """
+    During an Express Upgrade.
+    If in HA, on the Active NameNode only, examine the directory dfs.namenode.name.dir and
+    make sure that there is no "/previous" directory.
+
+    Create a list of all the DataNodes in the cluster.
+    hdfs dfsadmin -report > dfs-old-report-1.log
+
+    hdfs dfsadmin -safemode enter
+    hdfs dfsadmin -saveNamespace
+
+    Copy the checkpoint files located in ${dfs.namenode.name.dir}/current into a backup directory.
+
+    Finalize any prior HDFS upgrade,
+    hdfs dfsadmin -finalizeUpgrade
+
+    Prepare for a NameNode rolling upgrade in order to not lose any data.
+    hdfs dfsadmin -rollingUpgrade prepare
+    """
+    import params
+    Logger.info("Preparing the NameNodes for a NonRolling (aka Express) Upgrade.")
+
+    if params.security_enabled:
+      kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
+      Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+    hdfs_binary = self.get_hdfs_binary()
+    namenode_upgrade.prepare_upgrade_check_for_previous_dir()
+    namenode_upgrade.prepare_upgrade_enter_safe_mode(hdfs_binary)
+    namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
+    namenode_upgrade.prepare_upgrade_backup_namenode_dir()
+    namenode_upgrade.prepare_upgrade_finalize_previous_upgrades(hdfs_binary)
+
+    # Call -rollingUpgrade prepare
+    namenode_upgrade.prepare_rolling_upgrade(hdfs_binary)
+
+  def prepare_rolling_upgrade(self, env):
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.prepare_rolling_upgrade(hfds_binary)
+
+  def wait_for_safemode_off(self, env):
+    wait_for_safemode_off(self.get_hdfs_binary(), 30, True)
+
+  def finalize_non_rolling_upgrade(self, env):
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.finalize_upgrade("nonrolling", hfds_binary)
+
+  def finalize_rolling_upgrade(self, env):
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.finalize_upgrade("rolling", hfds_binary)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
+      # Therefore, we cannot call this code in that scenario.
+      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]
+      for e in call_if:
+        if (upgrade_type, params.upgrade_direction) == e:
+          conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-namenode", params.version)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    hdfs_binary = self.get_hdfs_binary()
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    dfsadmin_cmd = dfsadmin_base_command + " -report -live"
+    Execute(dfsadmin_cmd,
+            user=params.hdfs_user,
+            tries=60,
+            try_sleep=10
+    )
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
+                         'dfs.namenode.keytab.file',
+                         'dfs.namenode.kerberos.principal']
+    props_read_check = ['dfs.namenode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hdfs-site' not in security_params
+               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
+               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
+                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def rebalancehdfs(self, env):
+    import params
+    env.set_params(params)
+
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    _print("Starting balancer with threshold = %s\n" % threshold)
+
+    rebalance_env = {'PATH': params.hadoop_bin_dir}
+
+    if params.security_enabled:
+      # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+      # when executing HDFS rebalance command. Use the md5 hash of the combination of the principal and keytab file
+      # to generate a (relatively) unique cache filename so that we can use it as needed.
+      # TODO: params.tmp_dir=/var/lib/ambari-agent/tmp. However hdfs user doesn't have access to this path.
+      # TODO: Hence using /tmp
+      ccache_file_name = "hdfs_rebalance_cc_" + _md5(format("{hdfs_principal_name}|{hdfs_user_keytab}")).hexdigest()
+      ccache_file_path = os.path.join(tempfile.gettempdir(), ccache_file_name)
+      rebalance_env['KRB5CCNAME'] = ccache_file_path
+
+      # If there are no tickets in the cache or they are expired, perform a kinit, else use what
+      # is in the cache
+      klist_cmd = format("{klist_path_local} -s {ccache_file_path}")
+      kinit_cmd = format("{kinit_path_local} -c {ccache_file_path} -kt {hdfs_user_keytab} {hdfs_principal_name}")
+      if shell.call(klist_cmd, user=params.hdfs_user)[0] != 0:
+        Execute(kinit_cmd, user=params.hdfs_user)
+
+    def calculateCompletePercent(first, current):
+      # avoid division by zero
+      try:
+        division_result = current.bytesLeftToMove/first.bytesLeftToMove
+      except ZeroDivisionError:
+        Logger.warning("Division by zero. Bytes Left To Move = {0}. Return 1.0".format(first.bytesLeftToMove))
+        return 1.0
+      return 1.0 - division_result
+
+
+    def startRebalancingProcess(threshold, rebalance_env):
+      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      return as_user(rebalanceCommand, params.hdfs_user, env=rebalance_env)
+
+    command = startRebalancingProcess(threshold, rebalance_env)
+
+    basedir = os.path.join(env.config.basedir, 'scripts')
+    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
+      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
+      command = ['ambari-python-wrap','hdfs-command.py']
+
+    _print("Executing command %s\n" % command)
+
+    parser = hdfs_rebalance.HdfsParser()
+
+    def handle_new_line(line, is_stderr):
+      if is_stderr:
+        return
+
+      _print('[balancer] %s' % (line))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
+
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISED' :
+        _print('[balancer] %s' % ('Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        return
+
+    Execute(command,
+            on_new_line = handle_new_line,
+            logoutput = False,
+    )
+
+    if params.security_enabled:
+      # Delete the kerberos credentials cache (ccache) file
+      File(ccache_file_path,
+           action = "delete",
+      )
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.namenode_pid_file]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class NameNodeWindows(NameNode):
+  def install(self, env):
+    import install_params
+    self.install_packages(env)
+    #TODO we need this for HA because of manual steps
+    self.configure(env)
+
+  def rebalancehdfs(self, env):
+    from ambari_commons.os_windows import UserHelper, run_os_command_impersonated
+    import params
+    env.set_params(params)
+
+    hdfs_username, hdfs_domain = UserHelper.parse_user_name(params.hdfs_user, ".")
+
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    _print("Starting balancer with threshold = %s\n" % threshold)
+
+    def calculateCompletePercent(first, current):
+      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+
+    def startRebalancingProcess(threshold):
+      rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
+      return ['cmd', '/C', rebalanceCommand]
+
+    command = startRebalancingProcess(threshold)
+    basedir = os.path.join(env.config.basedir, 'scripts')
+
+    _print("Executing command %s\n" % command)
+
+    parser = hdfs_rebalance.HdfsParser()
+    returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_username, Script.get_password(params.hdfs_user), hdfs_domain)
+
+    for line in stdout.split('\n'):
+      _print('[balancer] %s %s' % (str(datetime.now()), line ))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
+
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISED' :
+        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        break
+
+    if returncode != None and returncode != 0:
+      raise Fail('Hdfs rebalance process exited with error. See the log output')
+
+def _print(line):
+  sys.stdout.write(line)
+  sys.stdout.flush()
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_ha_state.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_ha_state.py
new file mode 100644
index 0000000..259af2e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_ha_state.py
@@ -0,0 +1,219 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.jmx import get_value_from_jmx
+
+
+class NAMENODE_STATE:
+  ACTIVE = "active"
+  STANDBY = "standby"
+  UNKNOWN = "unknown"
+
+
+class NamenodeHAState:
+  """
+  Represents the current state of the Namenode Hosts in High Availability Mode
+  """
+
+  def __init__(self):
+    """
+    Initializes all fields by querying the Namenode state.
+    Raises a ValueError if unable to construct the object.
+    """
+    import params
+
+    self.name_service = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+    if self.name_service is None:
+      self.name_service = default('/configurations/hdfs-site/dfs.nameservices', None)
+
+    if not self.name_service:
+      raise ValueError("Could not retrieve property dfs.nameservices or dfs.internal.nameservices")
+
+    nn_unique_ids_key = "dfs.ha.namenodes." + str(self.name_service)
+    # List of the nn unique ids
+    self.nn_unique_ids = default("/configurations/hdfs-site/" + nn_unique_ids_key, None)
+    if not self.nn_unique_ids:
+      raise ValueError("Could not retrieve property " + nn_unique_ids_key)
+
+    self.nn_unique_ids = self.nn_unique_ids.split(",")
+    self.nn_unique_ids = [x.strip() for x in self.nn_unique_ids]
+
+    policy = default("/configurations/hdfs-site/dfs.http.policy", "HTTP_ONLY")
+    self.encrypted = policy.upper() == "HTTPS_ONLY"
+
+    jmx_uri_fragment = ("https" if self.encrypted else "http") + "://{0}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem"
+    namenode_http_fragment = "dfs.namenode.http-address.{0}.{1}"
+    namenode_https_fragment = "dfs.namenode.https-address.{0}.{1}"
+
+    # Dictionary where the key is the Namenode State (e.g., ACTIVE), and the value is a set of hostnames
+    self.namenode_state_to_hostnames = {}
+
+    # Dictionary from nn unique id name to a tuple of (http address, https address)
+    self.nn_unique_id_to_addresses = {}
+    for nn_unique_id in self.nn_unique_ids:
+      http_key = namenode_http_fragment.format(self.name_service, nn_unique_id)
+      https_key = namenode_https_fragment.format(self.name_service, nn_unique_id)
+
+      http_value = default("/configurations/hdfs-site/" + http_key, None)
+      https_value = default("/configurations/hdfs-site/" + https_key, None)
+      actual_value = https_value if self.encrypted else http_value
+      hostname = actual_value.split(":")[0].strip() if actual_value and ":" in actual_value else None
+
+      self.nn_unique_id_to_addresses[nn_unique_id] = (http_value, https_value)
+      try:
+        if not hostname:
+          raise Exception("Could not retrieve hostname from address " + actual_value)
+
+        jmx_uri = jmx_uri_fragment.format(actual_value)
+        state = get_value_from_jmx(jmx_uri, "tag.HAState", params.security_enabled, params.hdfs_user, params.is_https_enabled)
+
+        # If JMX parsing failed
+        if not state:
+          run_user = default("/configurations/hadoop-env/hdfs_user", "hdfs")
+          check_service_cmd = "hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {0}".format(nn_unique_id)
+          code, out = shell.call(check_service_cmd, logoutput=True, user=run_user)
+          if code == 0 and out:
+            if NAMENODE_STATE.STANDBY in out:
+              state = NAMENODE_STATE.STANDBY
+            elif NAMENODE_STATE.ACTIVE in out:
+              state = NAMENODE_STATE.ACTIVE
+
+        if not state:
+          raise Exception("Could not retrieve Namenode state from URL " + jmx_uri)
+
+        state = state.lower()
+
+        if state not in [NAMENODE_STATE.ACTIVE, NAMENODE_STATE.STANDBY]:
+          state = NAMENODE_STATE.UNKNOWN
+
+        if state in self.namenode_state_to_hostnames:
+          self.namenode_state_to_hostnames[state].add(hostname)
+        else:
+          hostnames = set([hostname, ])
+          self.namenode_state_to_hostnames[state] = hostnames
+      except:
+        Logger.error("Could not get namenode state for " + nn_unique_id)
+
+  def __str__(self):
+    return "Namenode HA State: {\n" + \
+           ("IDs: %s\n"       % ", ".join(self.nn_unique_ids)) + \
+           ("Addresses: %s\n" % str(self.nn_unique_id_to_addresses)) + \
+           ("States: %s\n"    % str(self.namenode_state_to_hostnames)) + \
+           ("Encrypted: %s\n" % str(self.encrypted)) + \
+           ("Healthy: %s\n"   % str(self.is_healthy())) + \
+           "}"
+
+  def is_encrypted(self):
+    """
+    :return: Returns a bool indicating if HTTPS is enabled
+    """
+    return self.encrypted
+
+  def get_nn_unique_ids(self):
+    """
+    :return Returns a list of the nn unique ids
+    """
+    return self.nn_unique_ids
+
+  def get_nn_unique_id_to_addresses(self):
+    """
+    :return Returns a dictionary where the key is the nn unique id, and the value is a tuple of (http address, https address)
+    Each address is of the form, hostname:port
+    """
+    return self.nn_unique_id_to_addresses
+
+  def get_address_for_nn_id(self, id):
+    """
+    :param id: Namenode ID
+    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given namenode id.
+    """
+    if id in self.nn_unique_id_to_addresses:
+      addresses = self.nn_unique_id_to_addresses[id]
+      if addresses and len(addresses) == 2:
+        return addresses[1] if self.encrypted else addresses[0]
+    return None
+
+  def get_address_for_host(self, hostname):
+    """
+    :param hostname: Host name
+    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given host.
+    """
+    for id, addresses in self.nn_unique_id_to_addresses.iteritems():
+      if addresses and len(addresses) == 2:
+        if ":" in addresses[0]:
+          nn_hostname = addresses[0].split(":")[0].strip()
+          if nn_hostname == hostname:
+            # Found the host
+            return addresses[1] if self.encrypted else addresses[0]
+    return None
+
+  def get_namenode_state_to_hostnames(self):
+    """
+    :return Return a dictionary where the key is a member of NAMENODE_STATE, and the value is a set of hostnames.
+    """
+    return self.namenode_state_to_hostnames
+
+  def get_address(self, namenode_state):
+    """
+    @param namenode_state: Member of NAMENODE_STATE
+    :return Get the address that corresponds to the first host with the given state
+    """
+    hosts = self.namenode_state_to_hostnames[namenode_state] if namenode_state in self.namenode_state_to_hostnames else []
+    if hosts and len(hosts) > 0:
+      hostname = list(hosts)[0]
+      return self.get_address_for_host(hostname)
+    return None
+
+  def is_active(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the active NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.ACTIVE)
+
+  def is_standby(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the standby NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.STANDBY)
+
+  def _is_in_state(self, host_name, state):
+    """
+    :param host_name: Host name
+    :param state: State to check
+    :return: Return True if this NameNode is in the specified state, otherwise, False.
+    """
+    mapping = self.get_namenode_state_to_hostnames()
+    if state in mapping:
+      hosts_in_state = mapping[state]
+      if hosts_in_state is not None and len(hosts_in_state) == 1 and next(iter(hosts_in_state)).lower() == host_name.lower():
+        return True
+    return False
+
+  def is_healthy(self):
+    """
+    :return: Returns a bool indicating if exactly one ACTIVE and one STANDBY host exist.
+    """
+    active_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.ACTIVE] if NAMENODE_STATE.ACTIVE in self.namenode_state_to_hostnames else []
+    standby_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.STANDBY] if NAMENODE_STATE.STANDBY in self.namenode_state_to_hostnames else []
+    return len(active_hosts) == 1 and len(standby_hosts) == 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_upgrade.py
new file mode 100644
index 0000000..f683dcc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/namenode_upgrade.py
@@ -0,0 +1,322 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core import shell
+from resource_management.core.shell import as_user
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions import Direction, SafeMode
+from utils import get_dfsadmin_base_command
+
+from namenode_ha_state import NamenodeHAState
+
+
+safemode_to_instruction = {SafeMode.ON: "enter",
+                           SafeMode.OFF: "leave"}
+
+NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE = "namenode-upgrade-in-progress"
+
+def prepare_upgrade_check_for_previous_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
+  Check that there is no "previous" folder inside the NameNode Name Dir.
+  """
+  import params
+
+  if params.dfs_ha_enabled:
+    namenode_ha = NamenodeHAState()
+    if namenode_ha.is_active(params.hostname):
+      Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
+
+      problematic_previous_namenode_dirs = set()
+      nn_name_dirs = params.dfs_name_dir.split(',')
+      for nn_dir in nn_name_dirs:
+        if os.path.isdir(nn_dir):
+          # Check for a previous folder, which is not allowed.
+          previous_dir = os.path.join(nn_dir, "previous")
+          if os.path.isdir(previous_dir):
+            problematic_previous_namenode_dirs.add(previous_dir)
+
+      if len(problematic_previous_namenode_dirs) > 0:
+        message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
+                  'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
+                  'NameNode Name Dir(s): {0}\n' \
+                  '***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
+        Logger.error(message)
+        raise Fail(message)
+
+def prepare_upgrade_enter_safe_mode(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires first entering Safemode.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  safe_mode_enter_cmd = dfsadmin_base_command + " -safemode enter"
+  try:
+    # Safe to call if already in Safe Mode
+    desired_state = SafeMode.ON
+    safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, params.dfs_ha_enabled, hdfs_binary)
+    Logger.info("Transition successful: {0}, original state: {1}".format(str(safemode_transition_successful), str(original_state)))
+    if not safemode_transition_successful:
+      raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
+  except Exception, e:
+    message = "Could not enter safemode. Error: {0}. As the HDFS user, call this command: {1}".format(str(e), safe_mode_enter_cmd)
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_save_namespace(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires saving the namespace.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  save_namespace_cmd = dfsadmin_base_command + " -saveNamespace"
+  try:
+    Logger.info("Checkpoint the current namespace.")
+    as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
+  except Exception, e:
+    message = format("Could not save the NameSpace. As the HDFS user, call this command: {save_namespace_cmd}")
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_backup_namenode_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up the NameNode Name Dirs.
+  """
+  import params
+
+  i = 0
+  failed_paths = []
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  backup_destination_root_dir = "{0}/{1}".format(params.namenode_backup_dir, params.stack_version_unformatted)
+  if len(nn_name_dirs) > 0:
+    Logger.info("Backup the NameNode name directory's CURRENT folder.")
+  for nn_dir in nn_name_dirs:
+    i += 1
+    namenode_current_image = os.path.join(nn_dir, "current")
+    unique = get_unique_id_and_date() + "_" + str(i)
+    # Note that /tmp may not be writeable.
+    backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique)
+
+    if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder):
+      try:
+        os.makedirs(backup_current_folder)
+        Execute(('cp', '-ar', namenode_current_image, backup_current_folder),
+                sudo=True
+        )
+      except Exception, e:
+        failed_paths.append(namenode_current_image)
+  if len(failed_paths) > 0:
+    Logger.error("Could not backup the NameNode Name Dir(s) to {0}, make sure that the destination path is "
+                 "writeable and copy the directories on your own. Directories: {1}".format(backup_destination_root_dir,
+                                                                                           ", ".join(failed_paths)))
+
+def prepare_upgrade_finalize_previous_upgrades(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires Finalizing any upgrades that are in progress.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  finalize_command = dfsadmin_base_command + " -rollingUpgrade finalize"
+  try:
+    Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
+                "This will return 255 if no upgrades are in progress.")
+    code, out = shell.checked_call(finalize_command, logoutput=True, user=params.hdfs_user)
+    if out:
+      expected_substring = "there is no rolling upgrade in progress"
+      if expected_substring not in out.lower():
+        Logger.warning('Finalize command did not contain substring: %s' % expected_substring)
+    else:
+      Logger.warning("Finalize command did not return any output.")
+  except Exception, e:
+    Logger.warning("Ensure no upgrades are in progress.")
+
+def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary):
+  """
+  Enter or leave safemode for the Namenode.
+  :param user: user to perform action as
+  :param safemode_state: Desired state of ON or OFF
+  :param in_ha: bool indicating if Namenode High Availability is enabled
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return: Returns a tuple of (transition success, original state). If no change is needed, the indicator of
+  success will be True
+  """
+  Logger.info("Prepare to transition into safemode state %s" % safemode_state)
+  import params
+  original_state = SafeMode.UNKNOWN
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  safemode_base_command = dfsadmin_base_command + " -safemode "
+  safemode_check_cmd = safemode_base_command + " get"
+
+  grep_pattern = format("Safe mode is {safemode_state}")
+  safemode_check_with_grep = format("{safemode_check_cmd} | grep '{grep_pattern}'")
+
+  code, out = shell.call(safemode_check_cmd, user=user, logoutput=True)
+  Logger.info("Command: %s\nCode: %d." % (safemode_check_cmd, code))
+  if code == 0 and out is not None:
+    Logger.info(out)
+    re_pattern = r"Safe mode is (\S*)"
+    Logger.info("Pattern to search: {0}".format(re_pattern))
+    m = re.search(re_pattern, out, re.IGNORECASE)
+    if m and len(m.groups()) >= 1:
+      original_state = m.group(1).upper()
+
+      if original_state == safemode_state:
+        return (True, original_state)
+      else:
+        # Make a transition
+        command = safemode_base_command + safemode_to_instruction[safemode_state]
+        Execute(command,
+                user=user,
+                logoutput=True,
+                path=[params.hadoop_bin_dir])
+
+        code, out = shell.call(safemode_check_with_grep, user=user)
+        Logger.info("Command: %s\nCode: %d. Out: %s" % (safemode_check_with_grep, code, out))
+        if code == 0:
+          return (True, original_state)
+  return (False, original_state)
+
+
+def prepare_rolling_upgrade(hdfs_binary):
+  """
+  This can be called during either Rolling Upgrade or Express Upgrade (aka nonrolling)
+
+  Rolling Upgrade for HDFS Namenode requires the following.
+  0. Namenode must be up
+  1. If HA: leave safemode if the safemode status is not OFF
+  2. Execute a rolling upgrade "prepare"
+  3. Execute a rolling upgrade "query"
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  if not params.upgrade_direction or params.upgrade_direction not in [Direction.UPGRADE, Direction.DOWNGRADE]:
+    raise Fail("Could not retrieve upgrade direction: %s" % str(params.upgrade_direction))
+  Logger.info(format("Performing a(n) {params.upgrade_direction} of HDFS"))
+
+  if params.security_enabled:
+    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") 
+    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+  if params.upgrade_direction == Direction.UPGRADE:
+    if params.dfs_ha_enabled:
+      Logger.info('High Availability is enabled, must leave safemode before calling "-rollingUpgrade prepare"')
+      desired_state = SafeMode.OFF
+      safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, True, hdfs_binary)
+      if not safemode_transition_successful:
+        raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
+
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    prepare = dfsadmin_base_command + " -rollingUpgrade prepare"
+    query = dfsadmin_base_command + " -rollingUpgrade query"
+    Execute(prepare,
+            user=params.hdfs_user,
+            logoutput=True)
+    Execute(query,
+            user=params.hdfs_user,
+            logoutput=True)
+
+def finalize_upgrade(upgrade_type, hdfs_binary):
+  """
+  Finalize the Namenode upgrade, at which point it cannot be downgraded.
+  :param upgrade_type rolling or nonrolling
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  Logger.info("Executing Rolling Upgrade finalize")
+  import params
+
+  if params.security_enabled:
+    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") 
+    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  finalize_cmd = dfsadmin_base_command + " -rollingUpgrade finalize"
+  query_cmd = dfsadmin_base_command + " -rollingUpgrade query"
+
+  Execute(query_cmd,
+        user=params.hdfs_user,
+        logoutput=True)
+  Execute(finalize_cmd,
+          user=params.hdfs_user,
+          logoutput=True)
+  Execute(query_cmd,
+          user=params.hdfs_user,
+          logoutput=True)
+
+  # upgrade is finalized; remove the upgrade marker
+  delete_upgrade_marker()
+
+
+def get_upgrade_in_progress_marker():
+  """
+  Gets the full path of the file which indicates that NameNode has begun its stack upgrade.
+  :return:
+  """
+  from resource_management.libraries.script.script import Script
+  return os.path.join(Script.get_tmp_dir(), NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE)
+
+
+def create_upgrade_marker():
+  """
+  Creates the marker file indicating that NameNode has begun participating in a stack upgrade.
+  If the file already exists, nothing will be done. This will silently log exceptions on failure.
+  :return:
+  """
+  # create the marker file which indicates
+  try:
+    namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
+    if not os.path.isfile(namenode_upgrade_in_progress_marker):
+      File(namenode_upgrade_in_progress_marker)
+  except:
+    Logger.warning("Unable to create NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker))
+
+
+def delete_upgrade_marker():
+  """
+  Removes the marker file indicating that NameNode has begun participating in a stack upgrade.
+  If the file does not exist, then nothing will be done.
+  Failure to remove this file could cause problems with restarts in the future. That's why
+  checking to see if there is a suspended upgrade is also advised. This function will raise
+  an exception if the file can't be removed.
+  :return:
+  """
+  # create the marker file which indicates
+  try:
+    namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
+    if os.path.isfile(namenode_upgrade_in_progress_marker):
+      File(namenode_upgrade_in_progress_marker, action='delete')
+  except:
+    error_message = "Unable to remove NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker)
+    Logger.error(error_message)
+    raise Fail(error_message)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/nfsgateway.py
new file mode 100644
index 0000000..7ba1f96
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/nfsgateway.py
@@ -0,0 +1,151 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hdfs_nfsgateway import nfsgateway
+from hdfs import hdfs
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+
+class NFSGateway(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-nfs3"
+
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_version_formatted and check_stack_feature(StackFeature.NFS, params.stack_version_formatted):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-nfs3", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    nfsgateway(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    nfsgateway(action="stop")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    nfsgateway(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.nfsgateway_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['nfs.keytab.file',
+                         'nfs.kerberos.principal']
+    props_read_check = ['nfs.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                'nfs.keytab.file' not in security_params['hdfs-site'] or
+                'nfs.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['nfs.keytab.file'],
+                                security_params['hdfs-site'][
+                                  'nfs.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.nfsgateway_pid_file]
+
+if __name__ == "__main__":
+  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params.py
new file mode 100644
index 0000000..25231f9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..55544e0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_linux.py
@@ -0,0 +1,527 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import status_params
+import utils
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+import re
+
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
+from resource_management.libraries.functions import is_empty
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = status_params.stack_name
+stack_root = Script.get_stack_root()
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+# there is a stack upgrade which has not yet been finalized; it's currently suspended
+upgrade_suspended = default("roleParams/upgrade_suspended", False)
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)
+
+# The desired role is only available during a Non-Rolling Upgrade in HA.
+# The server calculates which of the two NameNodes will be the active, and the other the standby since they
+# are started using different commands.
+desired_namenode_role = default("/commandParams/desired_namenode_role", None)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = status_params.hdfs_user
+root_user = "root"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+namenode_pid_file = status_params.namenode_pid_file
+zkfc_pid_file = status_params.zkfc_pid_file
+datanode_pid_file = status_params.datanode_pid_file
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
+secure_dn_ports_are_in_use = False
+
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir", "/tmp/upgrades")
+
+# hadoop default parameters
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_secure_dn_user = hdfs_user
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+# hadoop parameters for stacks that support rolling_upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = utils.get_port(dfs_dn_addr)
+    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
+hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
+
+create_lib_snappy_symlinks = check_stack_feature(StackFeature.SNAPPY, stack_version_formatted)
+jsvc_path = "/usr/lib/bigtop-utils"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
+ulimit_cmd = "ulimit -c unlimited ; "
+
+snappy_so = "libsnappy.so"
+so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+so_src_dir_x86 = format("{hadoop_home}/lib")
+so_src_dir_x64 = format("{hadoop_home}/lib64")
+so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+
+#security params
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+command_phase = default("/commandParams/phase","")
+
+klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+
+user_group = config['configurations']['cluster-env']['user_group']
+root_group = "root"
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
+
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+
+hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_created_stub_dir = hdfs_log_dir
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+hdfs_namenode_format_disabled = default("/configurations/cluster-env/hdfs_namenode_format_disabled", False)
+hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
+hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
+namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted", 
+  format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
+  "/var/lib/hdfs/namenode/formatted"]
+dfs_name_dirs = dfs_name_dir.split(",")
+namenode_formatted_mark_dirs = []
+namenode_bootstrapped_mark_dirs = []
+for dn_dir in dfs_name_dirs:
+ tmp_format_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
+ tmp_bootstrap_mark_dir = format("{dn_dir}{hdfs_namenode_bootstrapped_mark_suffix}")
+ namenode_formatted_mark_dirs.append(tmp_format_mark_dir)
+ namenode_bootstrapped_mark_dirs.append(tmp_bootstrap_mark_dir)
+
+# Use the namenode RPC address if configured, otherwise, fallback to the default file system
+namenode_address = None
+if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+  namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
+  namenode_address = format("hdfs://{namenode_rpcaddress}")
+else:
+  namenode_address = config['configurations']['core-site']['fs.defaultFS']
+
+fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
+
+dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+
+data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
+
+# hostname of the active HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
+
+# Values for the current Host
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+  # With HA enabled namenode_address is recomputed
+  namenode_address = format('hdfs://{dfs_ha_nameservices}')
+
+  # Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
+  if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
+    other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
+
+
+if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
+  https_only = True
+  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
+else:
+  https_only = False
+  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+  
+  
+if security_enabled:
+  dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+  dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+  dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
+  
+  dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
+  
+  nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+  nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+  nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
+  
+  nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
+
+  jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
+  if jn_principal_name:
+    jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
+  jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
+  hdfs_kinit_cmd = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+else:
+  dn_kinit_cmd = ""
+  nn_kinit_cmd = ""
+  hdfs_kinit_cmd = ""
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+
+# The logic for LZO also exists in OOZIE's params.py
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
+  
+name_node_params = default("/commandParams/namenode", None)
+
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+#ranger hdfs properties
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hadoop'
+
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
+fs_default_name = config['configurations']['core-site']['fs.defaultFS']
+hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
+hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
+common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
+
+repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+if security_enabled:
+  sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
+  sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
+
+ranger_env = config['configurations']['ranger-env']
+ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
+policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
+
+#For curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+
+is_https_enabled = is_https_enabled_in_hdfs(config['configurations']['hdfs-site']['dfs.http.policy'],
+                                            config['configurations']['hdfs-site']['dfs.https.enable'])
+
+if has_ranger_admin:
+  enable_ranger_hdfs = (config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower() == 'yes')
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+  repo_config_password = unicode(config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  previous_jdbc_jar_name = None
+
+  if stack_supports_ranger_audit_db:
+
+    if xa_audit_db_flavor == 'mysql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "com.mysql.jdbc.Driver"
+    elif xa_audit_db_flavor == 'oracle':
+      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+      colon_count = xa_db_host.count(':')
+      if colon_count == 2 or colon_count == 0:
+        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+      else:
+        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+      jdbc_driver = "oracle.jdbc.OracleDriver"
+    elif xa_audit_db_flavor == 'postgres':
+      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "org.postgresql.Driver"
+    elif xa_audit_db_flavor == 'mssql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+    elif xa_audit_db_flavor == 'sqla':
+      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+
+  sql_connector_jar = ''
+
+  hdfs_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'hadoop.security.authentication': hadoop_security_authentication,
+    'hadoop.security.authorization': hadoop_security_authorization,
+    'fs.default.name': fs_default_name,
+    'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
+    'hadoop.rpc.protection': hadoop_rpc_protection,
+    'commonNameForCertificate': common_name_for_certificate,
+    'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
+    'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
+    'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
+  }
+
+  hdfs_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hdfs_ranger_plugin_config),
+    'description': 'hdfs repo',
+    'name': repo_name,
+    'repositoryType': 'hdfs',
+    'assetType': '1'
+  }
+  if stack_supports_ranger_kerberos and security_enabled:
+    hdfs_ranger_plugin_config['policy.download.auth.users'] = hdfs_user
+    hdfs_ranger_plugin_config['tag.download.auth.users'] = hdfs_user
+
+  if stack_supports_ranger_kerberos:
+    hdfs_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hdfs_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hdfs_ranger_plugin_config,
+      'description': 'hdfs repo',
+      'name': repo_name,
+      'type': 'hdfs'
+    }
+
+  xa_audit_db_is_enabled = False
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db']
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+  ssl_keystore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+  ssl_truststore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+  #For SQLA explicitly disable audit to DB for Ranger
+  if xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..70d95a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/params_windows.py
@@ -0,0 +1,79 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+#Used in subsequent imports from params
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from install_params import exclude_packages
+from status_params import *
+
+config = Script.get_config()
+hadoop_conf_dir = None
+hbase_conf_dir = None
+hadoop_home = None
+try:
+  hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
+  hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+  hadoop_home = os.environ["HADOOP_HOME"]
+except:
+  pass
+#directories & files
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+#decomission
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+namenode_id = None
+namenode_rpc = None
+hostname = config["hostname"]
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hdfs_user = hadoop_user
+
+grep_exe = "findstr"
+
+name_node_params = default("/commandParams/namenode", None)
+
+service_map = {
+  "datanode" : datanode_win_service_name,
+  "journalnode" : journalnode_win_service_name,
+  "namenode" : namenode_win_service_name,
+  "secondarynamenode" : snamenode_win_service_name,
+  "zkfc_slave": zkfc_win_service_name
+}


[24/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
deleted file mode 100644
index 4a645b0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
+++ /dev/null
@@ -1,649 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_hdfs_dashboard",
-      "display_name": "Standard HDFS Dashboard",
-      "section_name": "HDFS_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "NameNode GC count",
-          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcCount._rate",
-              "metric_path": "metrics/jvm/gcCount._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC total count",
-              "value": "${jvm.JvmMetrics.GcCount._rate}"
-            },
-            {
-              "name": "GC count of type major collection",
-              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode GC time",
-          "description": "Total time taken by major type garbage collections in milliseconds.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC time in major collection",
-              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NN Connection Load",
-          "description": "Number of open RPC connections being managed by NameNode.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.NumOpenConnections",
-              "metric_path": "metrics/rpc/client/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.NumOpenConnections",
-              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Open Client Connections",
-              "value": "${rpc.rpc.client.NumOpenConnections}"
-            },
-            {
-              "name": "Open Datanode Connections",
-              "value": "${rpc.rpc.datanode.NumOpenConnections}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Heap",
-          "description": "Heap memory committed and Heap memory used with respect to time.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "JVM heap committed",
-              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
-            },
-            {
-              "name": "JVM heap used",
-              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system",
-              "metric_path": "metrics/cpu/cpu_system",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_user",
-              "metric_path": "metrics/cpu/cpu_user",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_nice",
-              "metric_path": "metrics/cpu/cpu_nice",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_idle",
-              "metric_path": "metrics/cpu/cpu_idle",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
-            },
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total - mem_free)/mem_total) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        },
-        {
-          "widget_name": "NameNode RPC",
-          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Client RPC Queue Wait time",
-              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Client RPC Processing time",
-              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Queue Wait time",
-              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Processing time",
-              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "ms"
-          }
-        },
-        {
-          "widget_name": "NameNode Operations",
-          "description": "Rate per second of number of file operation over time.",
-          "widget_type": "GRAPH",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.namenode.TotalFileOps._rate",
-              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "NameNode File Operations",
-              "value": "${dfs.namenode.TotalFileOps._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Failed disk volumes",
-          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
-              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Failed disk volumes",
-              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Blocks With Corrupted Replicas",
-          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Blocks With Corrupted Replicas",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "description": "Percentage of available space used in the DFS.",
-          "widget_type": "GAUGE",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0.75",
-            "error_threshold": "0.9"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_hdfs_heatmap",
-      "section_name": "HDFS_HEATMAPS",
-      "display_name": "HDFS Heatmaps",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "HDFS Bytes Read",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Read",
-              "value": "${dfs.datanode.BytesRead._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "HDFS Bytes Written",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Written",
-              "value": "${dfs.datanode.BytesWritten._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "DataNode Garbage Collection Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Garbage Collection Time",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Used",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Committed",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Committed",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Disk I/O Utilization",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalReadTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalWriteTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Disk I/O Utilization",
-              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Network I/O Utilization",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.RemoteBytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.RemoteBytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.WritesFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Network I/O Utilization",
-              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
deleted file mode 100644
index 9000e95..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
+++ /dev/null
@@ -1,246 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HDFS",
-      "identities": [
-        {
-          "name": "/spnego",
-          "principal": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
-          },
-          "keytab": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
-          }
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "auth_to_local_properties" : [
-        "core-site/hadoop.security.auth_to_local"
-      ],
-      "configurations": [
-        {
-          "core-site": {
-            "hadoop.security.authentication": "kerberos",
-            "hadoop.security.authorization": "true",
-            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
-          }
-        },
-        {
-          "ranger-hdfs-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name":  "HDFS_CLIENT",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        },
-        {
-          "name": "NAMENODE",
-          "identities": [
-            {
-              "name": "hdfs",
-              "principal": {
-                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
-                "type" : "user" ,
-                "configuration": "hadoop-env/hdfs_principal_name",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hdfs.headless.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hadoop-env/hdfs_user_keytab"
-              }
-            },
-            {
-              "name": "namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/namenode_nn",
-              "principal": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"                
-              },
-              "keytab": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "hdfs-site": {
-                "dfs.block.access.token.enable": "true"
-              }
-            }
-          ]
-        },
-        {
-          "name": "DATANODE",
-          "identities": [
-            {
-              "name": "datanode_dn",
-              "principal": {
-                "value": "dn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/dn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.datanode.keytab.file"
-              }
-            }
-          ],
-          "configurations" : [
-            {
-              "hdfs-site" : {
-                "dfs.datanode.address" : "0.0.0.0:1019",
-                "dfs.datanode.http.address": "0.0.0.0:1022"
-              }
-            }
-          ]
-        },
-        {
-          "name": "SECONDARY_NAMENODE",
-          "identities": [
-            {
-              "name": "secondary_namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        },
-        {
-          "name": "NFS_GATEWAY",
-          "identities": [
-            {
-              "name": "nfsgateway",
-              "principal": {
-                "value": "nfs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/nfs.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nfs.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/nfs.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "JOURNALNODE",
-          "identities": [
-            {
-              "name": "journalnode_jn",
-              "principal": {
-                "value": "jn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
index 30c49c7..ef2027f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
@@ -21,39 +21,8 @@
     <service>
       <name>HDFS</name>
       <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.7.1.3.0</version>
-      <extends>common-services/HDFS/2.1.0.2.0</extends>
-
-      <components>
-        <!-- NFS Gateway was added in HDP 2.3. -->
-        <component>
-          <name>NFS_GATEWAY</name>
-          <displayName>NFSGateway</displayName>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/nfsgateway.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <versionAdvertised>true</versionAdvertised>
-        </component>
-      </components>
+      <version>3.0.0.3.0</version>
+      <extends>common-services/HDFS/3.0.0</extends>
 
       <osSpecifics>
         <osSpecific>
@@ -171,20 +140,6 @@
         </osSpecific>
       </osSpecifics>
 
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
deleted file mode 100644
index 5318ba0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"dfs.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"hdfs-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "namenode_ui",
-        "label": "NameNode UI",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_logs",
-        "label": "NameNode Logs",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/logs",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_jmx",
-        "label": "NameNode JMX",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/jmx",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "Thread Stacks",
-        "label": "Thread Stacks",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/stacks",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
deleted file mode 100644
index 6f2b797..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
+++ /dev/null
@@ -1,179 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for HDFS service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "2",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-namenode",
-                  "display-name": "NameNode",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-namenode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-datanode",
-                  "display-name": "DataNode",
-                  "row-index": "0",
-                  "column-index": "1",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-datanode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "hdfs-site/dfs.namenode.name.dir",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hadoop-env/namenode_heapsize",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.handler.count",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.data.dir",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hadoop-env/dtnode_heapsize",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-          "subsection-name": "subsection-datanode-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "hdfs-site/dfs.namenode.name.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.handler.count",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hadoop-env/namenode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.data.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hadoop-env/dtnode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
deleted file mode 100644
index 782f21d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
+++ /dev/null
@@ -1,670 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Memory Utilization",
-          "description": "Percentage of total memory allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
-              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory Utilization",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "CPU Utilization",
-          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
-              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Bad Local Disks",
-          "description": "Number of unhealthy local disks across all NodeManagers.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "metric_path": "metrics/yarn/BadLocalDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.BadLogDirs",
-              "metric_path": "metrics/yarn/BadLogDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Number of unhealthy local disks for NodeManager",
-              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "Percentage of all containers failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
-              "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
-              "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "App Failures",
-          "description": "Percentage of all launched applications failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
-              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "App Failures",
-              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Pending Apps",
-          "description": "Count of applications waiting for cluster resources to become available.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Pending Apps",
-              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Apps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "mem_total._sum",
-              "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "mem_free._sum",
-              "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "YARN local disk space utilization per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "YARN local disk space utilization per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
-              "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
-              "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
-              "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
-              "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager GC Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "NodeManager JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "Allocated Containers",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager RAM Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager CPU Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
index a70fad3..deb4ef7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -1,25 +1,23 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
 -->
-<configuration supports_adding_forbidden="true">
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
   <!-- These configs were inherited from HDP 2.2 -->
   <!-- mapred-env.sh -->
   <property>
@@ -27,21 +25,21 @@
     <display-name>mapred-env template</display-name>
     <description>This is the jinja template for mapred-env.sh file</description>
     <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
 
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+      export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
 
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+      export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
 
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
-export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+      #export HADOOP_JOB_HISTORYSERVER_OPTS=
+      #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+      #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+      #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+      #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+      #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+      export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+      export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
index cef2b14..46f1c32 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -18,6 +18,16 @@
 -->
 <!-- Put site-specific property overrides in this file. -->
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
   <!-- These configs were inherited from HDP 2.2 -->
   <property>
     <name>mapreduce.admin.user.env</name>
@@ -30,15 +40,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>mapreduce.application.framework.path</name>
     <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description/>
@@ -74,61 +75,4 @@
     <description/>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
-    <value>1</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
-    <value>30000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.emit-timeline-data</name>
-    <value>false</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.bind-host</name>
-    <value>0.0.0.0</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>mapreduce.jobhistory.recovery.enable</name>
-    <value>true</value>
-    <description>Enable the history server to store server state and recover
-      server state upon startup.  If enabled then
-      mapreduce.jobhistory.recovery.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.class</name>
-    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
-    <description>The HistoryServerStateStoreService class to store history server
-      state for recovery.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
-    <value>/hadoop/mapreduce/jhs</value>
-    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
-      is configured as the recovery storage class.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 4768e46..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <description>
-      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
-      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
-      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
-      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
-    </description>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <display-name>CPU Scheduling</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
-    <value>*</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>capacity-scheduler</name>
-    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
-    <depends-on>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>enable_hive_interactive</name>
-      </property>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>llap_queue_capacity</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>


[39/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
new file mode 100644
index 0000000..fd41817
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
@@ -0,0 +1,217 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/db/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs are deleted in HDP 2.5. -->
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..b31742c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HDFS</display-name>
+    <description>This user must be system user and also present at Ranger
+      admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hdfs-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HDFS</display-name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false" />
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-policymgr-ssl.xml
new file mode 100644
index 0000000..de3fcd6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-policymgr-ssl.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-security.xml
new file mode 100644
index 0000000..1b0a821
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-security.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>ranger.plugin.hdfs.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing Hdfs policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.add-hadoop-authorization</name>
+    <value>true</value>
+    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-client.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-client.xml
new file mode 100644
index 0000000..6ec064a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-client.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.client.truststore.location</name>
+    <value>/etc/security/clientKeys/all.jks</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the trust store file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.reload.interval</name>
+    <value>10000</value>
+    <description>Truststore reload interval, in milliseconds.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.keystore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.keystore.location</name>
+    <value>/etc/security/clientKeys/keystore.jks</value>
+    <description>Location of the keystore file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.keystore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the keystore file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-server.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-server.xml
new file mode 100644
index 0000000..5d2745f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ssl-server.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.server.truststore.location</name>
+    <value>/etc/security/serverKeys/all.jks</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the trust store file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.reload.interval</name>
+    <value>10000</value>
+    <description>Truststore reload interval, in milliseconds.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.location</name>
+    <value>/etc/security/serverKeys/keystore.jks</value>
+    <description>Location of the keystore file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the keystore file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.keypassword</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password for private key in keystore file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/kerberos.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/kerberos.json
new file mode 100644
index 0000000..1dd801b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/kerberos.json
@@ -0,0 +1,246 @@
+{
+  "services": [
+    {
+      "name": "HDFS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "HDFS_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        },
+        {
+          "name": "NAMENODE",
+          "identities": [
+            {
+              "name": "hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            },
+            {
+              "name": "namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DATANODE",
+          "identities": [
+            {
+              "name": "datanode_dn",
+              "principal": {
+                "value": "dn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/dn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.datanode.keytab.file"
+              }
+            }
+          ],
+          "configurations" : [
+            {
+              "hdfs-site" : {
+                "dfs.datanode.address" : "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SECONDARY_NAMENODE",
+          "identities": [
+            {
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NFS_GATEWAY",
+          "identities": [
+            {
+              "name": "nfsgateway",
+              "principal": {
+                "value": "nfs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/nfs.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "JOURNALNODE",
+          "identities": [
+            {
+              "name": "journalnode_jn",
+              "principal": {
+                "value": "jn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metainfo.xml
new file mode 100644
index 0000000..01ab22f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metainfo.xml
@@ -0,0 +1,405 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>3.0.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <displayName>NameNode</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <dependencies>
+            <dependency>
+              <name>HDFS/ZKFC</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>hdfs-site</configType>
+                  <property>dfs.nameservices</property>
+                </condition>
+              </conditions>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>hdfs-site</configType>
+                  <property>dfs.nameservices</property>
+                </condition>
+              </conditions>
+            </dependency>
+            <dependency>
+              <name>HDFS/JOURNALNODE</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>hdfs-site</configType>
+                  <property>dfs.nameservices</property>
+                </condition>
+              </conditions>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_namenode</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>hdfs_audit</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <displayName>DataNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <bulkCommands>
+            <displayName>DataNodes</displayName>
+            <!-- Used by decommission and recommission -->
+            <masterComponent>NAMENODE</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>hdfs_datanode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <displayName>SNameNode</displayName>
+          <!-- TODO:  cardinality is conditional on HA usage -->
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_secondarynamenode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <displayName>HDFS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>                          
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <displayName>JournalNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_journalnode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <displayName>ZKFailoverController</displayName>
+          <category>SLAVE</category>
+          <!-- TODO: cardinality is conditional on HA topology -->
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_zkfc</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>NFS_GATEWAY</name>
+          <displayName>NFSGateway</displayName>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nfsgateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>libhdfs0</name>
+            </package>
+            <package>
+              <name>libhdfs0-dev</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+        <config-type>ranger-hdfs-plugin-properties</config-type>
+        <config-type>ssl-client</config-type>
+        <config-type>ssl-server</config-type>
+        <config-type>ranger-hdfs-audit</config-type>
+        <config-type>ranger-hdfs-policymgr-ssl</config-type>
+        <config-type>ranger-hdfs-security</config-type>
+        <config-type>ams-ssl-client</config-type>
+        <config-type>hadoop-metrics2.properties</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+    </service>
+  </services>
+</metainfo>


[32/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/widgets.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/widgets.json
new file mode 100644
index 0000000..4a645b0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/widgets.json
@@ -0,0 +1,649 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.NumOpenConnections",
+              "metric_path": "metrics/rpc/client/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.NumOpenConnections",
+              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Client Connections",
+              "value": "${rpc.rpc.client.NumOpenConnections}"
+            },
+            {
+              "name": "Open Datanode Connections",
+              "value": "${rpc.rpc.datanode.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Client RPC Queue Wait time",
+              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Client RPC Processing time",
+              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Queue Wait time",
+              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Processing time",
+              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Rate per second of number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps._rate",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Blocks With Corrupted Replicas",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Blocks With Corrupted Replicas",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[27/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/metainfo.xml
new file mode 100644
index 0000000..f520e7e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/metainfo.xml
@@ -0,0 +1,383 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0</version>
+      <components>
+
+        <component>
+          <component>
+            <name>APP_TIMELINE_SERVER</name>
+            <displayName>App Timeline Server</displayName>
+            <category>MASTER</category>
+            <cardinality>1</cardinality>
+            <versionAdvertised>true</versionAdvertised>
+            <reassignAllowed>true</reassignAllowed>
+
+            <commandScript>
+              <script>scripts/application_timeline_server.py</script>
+              <scriptType>PYTHON</scriptType>
+              <timeout>1200</timeout>
+            </commandScript>
+
+            <dependencies>
+              <dependency>
+                <name>TEZ/TEZ_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>SPARK/SPARK_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                </auto-deploy>
+              </dependency>
+            </dependencies>
+          </component>
+
+          <name>RESOURCEMANAGER</name>
+          <displayName>ResourceManager</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <logs>
+            <log>
+              <logId>yarn_resourcemanager</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>yarn_historyserver</logId>
+            </log>
+            <log>
+              <logId>yarn_jobsummary</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REFRESHQUEUES</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+            <config-type>hdfs-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+           <bulkCommands>
+             <displayName>NodeManagers</displayName>
+             <!-- Used by decommission and recommission -->
+             <masterComponent>RESOURCEMANAGER</masterComponent>
+           </bulkCommands>
+          <logs>
+            <log>
+              <logId>yarn_nodemanager</logId>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <displayName>YARN Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>yarn-env.sh</fileName>
+              <dictionaryName>yarn-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>capacity-scheduler.xml</fileName>
+              <dictionaryName>capacity-scheduler</dictionaryName>
+            </configFile>                        
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+        <service>MAPREDUCE2</service>
+      </requiredServices>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>yarn-log4j</config-type>
+        <config-type>ams-ssl-client</config-type>
+        <config-type>ranger-yarn-plugin-properties</config-type>
+        <config-type>ranger-yarn-audit</config-type>
+        <config-type>ranger-yarn-policymgr-ssl</config-type>
+        <config-type>ranger-yarn-security</config-type>
+      </configuration-dependencies>
+
+      <widgetsFileName>YARN_widgets.json</widgetsFileName>
+      <metricsFileName>YARN_metrics.json</metricsFileName>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SLIDER/SLIDER</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>mapred_historyserver</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>mapred-site.xml</fileName>
+              <dictionaryName>mapred-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>mapred-env.sh</fileName>
+              <dictionaryName>mapred-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <configuration-dependencies>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+        <config-type>ssl-client</config-type>
+        <config-type>ssl-server</config-type>
+        <config-type>ams-ssl-client</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
+      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanager_health.py
new file mode 100644
index 0000000..d7159e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanager_health.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import socket
+import urllib2
+import logging
+import traceback
+from ambari_commons import OSCheck
+from ambari_commons.inet_utils import resolve_address
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+OK_MESSAGE = 'NodeManager Healthy'
+CRITICAL_CONNECTION_MESSAGE = 'Connection failed to {0} ({1})'
+CRITICAL_HTTP_STATUS_MESSAGE = 'HTTP {0} returned from {1} ({2}) \n{3}'
+CRITICAL_NODEMANAGER_STATUS_MESSAGE = 'NodeManager returned an unexpected status of "{0}"'
+CRITICAL_NODEMANAGER_UNKNOWN_JSON_MESSAGE = 'Unable to determine NodeManager health from unexpected JSON response'
+
+KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
+KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+NODEMANAGER_DEFAULT_PORT = 8042
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health on {0} fails:"
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
+  YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  result_code = RESULT_CODE_UNKNOWN
+
+  if configurations is None:
+    return (result_code, ['There were no configurations supplied to the script.'])
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  scheme = 'http'
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
+    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
+    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[YARN_HTTP_POLICY_KEY]
+
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+
+  # determine the right URI and whether to use SSL
+  host_port = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      host_port = https_uri
+
+  label = ''
+  url_response = None
+  node_healthy = 'false'
+  total_time = 0
+
+  # replace hostname on host fqdn to make it work on all environments
+  if host_port is not None:
+    if ":" in host_port:
+      uri_host, uri_port = host_port.split(':')
+      host_port = '{0}:{1}'.format(host_name, uri_port)
+    else:
+      host_port = host_name
+
+  # some yarn-site structures don't have the web ui address
+  if host_port is None:
+    host_port = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT)
+
+  query = "{0}://{1}/ws/v1/node/info".format(scheme, host_port)
+
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      # curl requires an integer timeout
+      curl_connection_timeout = int(connection_timeout)
+
+      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+        query, "nm_health_alert", executable_paths, False, "NodeManager Health", smokeuser,
+        connection_timeout=curl_connection_timeout, kinit_timer_ms = kinit_timer_ms)
+
+      json_response = json.loads(url_response)
+    else:
+      # execute the query for the JSON that includes templeton status
+      url_response = urllib2.urlopen(query, timeout=connection_timeout)
+      json_response = json.loads(url_response.read())
+  except urllib2.HTTPError, httpError:
+    label = CRITICAL_HTTP_STATUS_MESSAGE.format(str(httpError.code), query,
+      str(httpError), traceback.format_exc())
+
+    return (RESULT_CODE_CRITICAL, [label])
+  except:
+    label = CRITICAL_CONNECTION_MESSAGE.format(query, traceback.format_exc())
+    return (RESULT_CODE_CRITICAL, [label])
+
+  # URL response received, parse it
+  try:
+    node_healthy = json_response['nodeInfo']['nodeHealthy']
+    node_healthy_report = json_response['nodeInfo']['healthReport']
+
+    # convert boolean to string
+    node_healthy = str(node_healthy)
+  except:
+    return (RESULT_CODE_CRITICAL, [query + "\n" + traceback.format_exc()])
+  finally:
+    if url_response is not None:
+      try:
+        url_response.close()
+      except:
+        pass
+
+  # proper JSON received, compare against known value
+  if node_healthy.lower() == 'true':
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE
+  elif node_healthy.lower() == 'false':
+    result_code = RESULT_CODE_CRITICAL
+    label = node_healthy_report
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_NODEMANAGER_STATUS_MESSAGE.format(node_healthy)
+
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanagers_summary.py
new file mode 100644
index 0000000..adf27ec
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/alerts/alert_nodemanagers_summary.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+import traceback
+
+from ambari_commons.urllib_handlers import RefreshHeaderProcessor
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
+OK_LABEL = 'All NodeManagers are healthy'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
+KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health Summary on {0} fails:"
+logger = logging.getLogger('ambari_alerts')
+
+QRY = "Hadoop:service=ResourceManager,name=RMNMInfo"
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS, \
+    YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  scheme = 'http'  
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
+    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
+    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[YARN_HTTP_POLICY_KEY]
+    
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      uri = https_uri
+
+  uri = str(host_name) + ":" + uri.split(":")[1]
+  live_nodemanagers_qry = "{0}://{1}/jmx?qry={2}".format(scheme, uri, QRY)
+  convert_to_json_failed = False
+  response_code = None
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      # curl requires an integer timeout
+      curl_connection_timeout = int(connection_timeout)
+
+      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+        live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
+        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      try:
+        url_response_json = json.loads(url_response)
+        live_nodemanagers = json.loads(find_value_in_jmx(url_response_json, "LiveNodeManagers", live_nodemanagers_qry))
+      except ValueError, error:
+        convert_to_json_failed = True
+        logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
+        format("NodeManager Health Summary", str(error)))
+
+      if convert_to_json_failed:
+        response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+          live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
+          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+          kinit_timer_ms = kinit_timer_ms)
+    else:
+      live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry,
+      "LiveNodeManagers", connection_timeout))
+
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      if response_code in [200, 307] and convert_to_json_failed:
+        return ('UNKNOWN', ['HTTP {0} response (metrics unavailable)'.format(str(response_code))])
+      elif convert_to_json_failed and response_code not in [200, 307]:
+        raise Exception("[Alert][NodeManager Health Summary] Getting data from {0} failed with http code {1}".format(
+          str(live_nodemanagers_qry), str(response_code)))
+
+    unhealthy_count = 0
+
+    for nodemanager in live_nodemanagers:
+      health_report = nodemanager['State']
+      if health_report == 'UNHEALTHY':
+        unhealthy_count += 1
+
+    if unhealthy_count == 0:
+      result_code = 'OK'
+      label = OK_LABEL
+    else:
+      result_code = 'CRITICAL'
+      if unhealthy_count == 1:
+        label = ERROR_LABEL.format(unhealthy_count, '', 'is')
+      else:
+        label = ERROR_LABEL.format(unhealthy_count, 's', 'are')
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+def get_value_from_jmx(query, jmx_property, connection_timeout):
+  response = None
+  
+  try:
+    # use a customer header process that will look for the non-standard
+    # "Refresh" header and attempt to follow the redirect
+    url_opener = urllib2.build_opener(RefreshHeaderProcessor())
+    response = url_opener.open(query, timeout=connection_timeout)
+
+    data = response.read()
+    data_dict = json.loads(data)
+    return find_value_in_jmx(data_dict, jmx_property, query)
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass
+
+
+def find_value_in_jmx(data_dict, jmx_property, query):
+  json_data = data_dict["beans"][0]
+
+  if jmx_property not in json_data:
+    beans = data_dict['beans']
+    for jmx_prop_list_item in beans:
+      if "name" in jmx_prop_list_item and jmx_prop_list_item["name"] == QRY:
+        if jmx_property not in jmx_prop_list_item:
+          raise Exception("Unable to find {0} in JSON from {1} ".format(jmx_property, query))
+        json_data = jmx_prop_list_item
+
+  return json_data[jmx_property]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/files/validateYarnComponentStatusWindows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/files/validateYarnComponentStatusWindows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/files/validateYarnComponentStatusWindows.py
new file mode 100644
index 0000000..5e2b4d9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/files/validateYarnComponentStatusWindows.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import urllib2
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+
+  try:
+    handle = urllib2.urlopen(url)
+    output = handle.read()
+    handle.close()
+    response = json.loads(output)
+    if response == None:
+      print 'There is no response for url: ' + str(url)
+      exit(1)
+    return response
+  except Exception as e:
+    print 'Error getting response for url:' + str(url), e
+    exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAvailabilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking availability status of component', e
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAbilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking ability of component', e
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/application_timeline_server.py
new file mode 100644
index 0000000..03fff21
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/application_timeline_server.py
@@ -0,0 +1,162 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
+  FILE_TYPE_XML
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class ApplicationTimelineServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('timelineserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('timelineserver', action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name='apptimelineserver')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ApplicationTimelineServerWindows(ApplicationTimelineServer):
+  def status(self, env):
+    service('timelineserver', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ApplicationTimelineServerDefault(ApplicationTimelineServer):
+  def get_component_name(self):
+    return "hadoop-yarn-timelineserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-timelineserver", params.version)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.yarn_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.enabled": "true",
+                           "yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.timeline-service.principal",
+                           "yarn.timeline-service.keytab",
+                           "yarn.timeline-service.http-authentication.kerberos.principal",
+                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
+
+      props_read_check = ["yarn.timeline-service.keytab",
+                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
+               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
+            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
+            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+
+  def get_pid_files(self):
+    import status_params
+    Execute(format("mv {status_params.yarn_historyserver_pid_file_old} {status_params.yarn_historyserver_pid_file}"),
+            only_if = format("test -e {status_params.yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
+    return [status_params.yarn_historyserver_pid_file]
+
+if __name__ == "__main__":
+  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/historyserver.py
new file mode 100644
index 0000000..8f5d380
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/historyserver.py
@@ -0,0 +1,192 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+
+from install_jars import install_tez_jars
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HistoryServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop', serviceName='mapreduce')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="historyserver")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HistoryserverWindows(HistoryServer):
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def status(self, env):
+    service('historyserver', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HistoryServerDefault(HistoryServer):
+  def get_component_name(self):
+    return "hadoop-mapreduce-historyserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      # MC Hammer said, "Can't touch this"
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      params.HdfsResource(None, action="execute")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+      # MC Hammer said, "Can't touch this"
+      resource_created = copy_to_hdfs(
+        "mapreduce",
+        params.user_group,
+        params.hdfs_user,
+        skip=params.sysprep_skip_copy_tarballs_hdfs)
+      resource_created = copy_to_hdfs(
+        "tez",
+        params.user_group,
+        params.hdfs_user,
+        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
+      resource_created = copy_to_hdfs(
+        "slider",
+        params.user_group,
+        params.hdfs_user,
+        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+    else:
+      # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
+      install_tez_jars()
+
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.mapred_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      expectations = {}
+      expectations.update(build_expectations('mapred-site',
+                                             None,
+                                             [
+                                               'mapreduce.jobhistory.keytab',
+                                               'mapreduce.jobhistory.principal',
+                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
+                                               'mapreduce.jobhistory.webapp.spnego-principal'
+                                             ],
+                                             None))
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'mapred-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'mapred-site' not in security_params or
+               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal not set."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
+                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.mapred_log_dir
+
+  def get_user(self):
+    import params
+    return params.mapred_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.mapred_historyserver_pid_file]
+
+if __name__ == "__main__":
+  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/install_jars.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/install_jars.py
new file mode 100644
index 0000000..728a014
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/install_jars.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.format import format
+import os
+import glob
+
+def install_tez_jars():
+  import params
+
+  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
+
+  # If tez libraries are to be stored in hdfs
+  if destination_hdfs_dirs:
+    for hdfs_dir in destination_hdfs_dirs:
+      params.HdfsResource(hdfs_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.tez_user,
+                           mode=0755
+      )
+
+    app_dir_path = None
+    lib_dir_path = None
+
+    if len(destination_hdfs_dirs) > 0:
+      for path in destination_hdfs_dirs:
+        if 'lib' in path:
+          lib_dir_path = path
+        else:
+          app_dir_path = path
+        pass
+      pass
+    pass
+
+    tez_jars = {}
+    if app_dir_path:
+      tez_jars[params.tez_local_api_jars] = app_dir_path
+    if lib_dir_path:
+      tez_jars[params.tez_local_lib_jars] = lib_dir_path
+
+    for src_file_regex, dest_dir in tez_jars.iteritems():
+      for src_filepath in glob.glob(src_file_regex):
+        src_filename = os.path.basename(src_filepath)
+        params.HdfsResource(format("{dest_dir}/{src_filename}"),
+                            type="file",
+                            action="create_on_execute",
+                            source=src_filepath,
+                            mode=0755,
+                            owner=params.tez_user
+         )
+        
+    for src_file_regex, dest_dir in tez_jars.iteritems():
+      for src_filepath in glob.glob(src_file_regex):
+        src_filename = os.path.basename(src_filepath)
+        params.HdfsResource(format("{dest_dir}/{src_filename}"),
+                            type="file",
+                            action="create_on_execute",
+                            source=src_filepath,
+                            mode=0755,
+                            owner=params.tez_user
+         )
+    params.HdfsResource(None, action="execute")
+
+
+def get_tez_hdfs_dir_paths(tez_lib_uris = None):
+  hdfs_path_prefix = 'hdfs://'
+  lib_dir_paths = []
+  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
+    dir_paths = tez_lib_uris.split(',')
+    for path in dir_paths:
+      if not "tez.tar.gz" in path:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
+        lib_dir_paths.append(lib_dir_path)
+      else:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_paths.append(os.path.dirname(lib_dir_path))
+    pass
+  pass
+
+  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000..6288ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapred_service_check.py
@@ -0,0 +1,172 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    component_type = 'hs'
+    if params.hadoop_ssl_enabled:
+      component_address = params.hs_webui_address
+    else:
+      component_address = params.hs_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatusWindows.py"
+    validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
+      python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+    if params.security_enabled:
+      kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+      smoke_cmd = kinit_cmd + validateStatusCmd
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName)
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
+    #
+    # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
+    # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
+    # input_file = format("/user/hadoop/mapredsmokeinput")
+    # output_file = format("/user/hadoop/mapredsmokeoutput")
+    # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
+    # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
+    # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
+    # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
+    #
+    # if params.security_enabled:
+    #   kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
+    #   Execute(kinit_cmd)
+    #
+    # Execute(cleanup_cmd,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(create_file_cmd,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(run_wordcount_job,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(test_cmd,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.smokeuser,
+                      mode=params.smoke_hdfs_user_mode,
+    )
+    params.HdfsResource(output_file,
+                        action = "delete_on_execute",
+                        type = "directory",
+                        dfs_type = params.dfs_type,
+    )
+    params.HdfsResource(input_file,
+                        action = "create_on_execute",
+                        type = "file",
+                        source = "/etc/passwd",
+                        dfs_type = params.dfs_type,
+    )
+    params.HdfsResource(None, action="execute")
+
+    # initialize the ticket
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      Execute(kinit_cmd, user=params.smokeuser)
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True)
+
+    # the ticket may have expired, so re-initialize
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      Execute(kinit_cmd, user=params.smokeuser)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir)
+
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000..424157b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/mapreduce2_client.py
@@ -0,0 +1,98 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+# Python imports
+import os
+import sys
+
+# Local imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from yarn import yarn
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+
+
+class MapReduce2Client(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, config_dir=None, upgrade_type=None):
+    """
+    :param env: Python environment
+    :param config_dir: During rolling upgrade, which config directory to save configs to.
+    """
+    import params
+    env.set_params(params)
+    yarn(config_dir=config_dir)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def stack_upgrade_save_new_config(self, env):
+    """
+    Because this gets called during a Rolling Upgrade, the new mapreduce configs have already been saved, so we must be
+    careful to only call configure() on the directory of the new version.
+    :param env:
+    """
+    import params
+    env.set_params(params)
+
+    conf_select_name = "hadoop"
+    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
+
+    if config_dir:
+      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
+
+      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
+      # must change it now so this function can find the Jinja Templates for the service.
+      env.config.basedir = base_dir
+      conf_select.select(params.stack_name, conf_select_name, params.version)
+      self.configure(env, config_dir=config_dir)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class MapReduce2ClientWindows(MapReduce2Client):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class MapReduce2ClientDefault(MapReduce2Client):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager.py
new file mode 100644
index 0000000..133d2e1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager.py
@@ -0,0 +1,166 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import nodemanager_upgrade
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.core.logger import Logger
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('nodemanager',action='stop')
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',action='start')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="nodemanager")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class NodemanagerWindows(Nodemanager):
+  def status(self, env):
+    service('nodemanager', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class NodemanagerDefault(Nodemanager):
+  def get_component_name(self):
+    return "hadoop-yarn-nodemanager"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-nodemanager", params.version)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    nodemanager_upgrade.post_upgrade_check()
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.nodemanager.principal",
+                           "yarn.nodemanager.keytab",
+                           "yarn.nodemanager.webapp.spnego-principal",
+                           "yarn.nodemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.nodemanager.keytab",
+                          "yarn.nodemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.keytab'],
+                                security_params['yarn-site']['yarn.nodemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.nodemanager_pid_file]
+
+if __name__ == "__main__":
+  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager_upgrade.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager_upgrade.py
new file mode 100644
index 0000000..22cd8cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/nodemanager_upgrade.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import subprocess
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.format import format
+
+
+def post_upgrade_check():
+  '''
+  Checks that the NodeManager has rejoined the cluster.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  '''
+  import params
+
+  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
+  if params.security_enabled and params.nodemanager_kinit_cmd:
+    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)
+
+  try:
+    _check_nodemanager_startup()
+  except Fail:
+    show_logs(params.yarn_log_dir, params.yarn_user)
+    raise
+    
+
+@retry(times=30, sleep_time=10, err_class=Fail)
+def _check_nodemanager_startup():
+  '''
+  Checks that a NodeManager is in a RUNNING state in the cluster via
+  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  '''
+  import params
+  import socket
+
+  command = 'yarn node -list -states=RUNNING'
+  return_code, yarn_output = shell.checked_call(command, user=params.yarn_user)
+  
+  hostname = params.hostname.lower()
+  hostname_ip = socket.gethostbyname(params.hostname.lower())
+  nodemanager_address = params.nm_address.lower()
+  yarn_output = yarn_output.lower()
+
+  if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
+    Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address))
+    return
+  else:
+    raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output))

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params.py
new file mode 100644
index 0000000..d0ad6f6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/scripts/params.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
+retryAble = default("/commandParams/command_retry_enabled", False)


[33/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
new file mode 100644
index 0000000..981f002
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/service_check.py
@@ -0,0 +1,152 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.shell import as_user
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries import functions
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.core.logger import Logger
+from resource_management.core.source import StaticFile
+from resource_management.core.resources.system import Execute, File
+
+
+class HdfsServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HdfsServiceCheckDefault(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = functions.get_unique_id_and_date()
+    dir = params.hdfs_tmp_dir
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = format("dfsadmin -fs {namenode_address} -safemode get | grep OFF")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+    ExecuteHadoop(safemode_command,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    params.HdfsResource(dir,
+                        type="directory",
+                        action="create_on_execute",
+                        mode=0777
+    )
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        action="delete_on_execute",
+    )
+
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        source="/etc/passwd",
+                        action="create_on_execute"
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.has_journalnode_hosts:
+      if params.security_enabled:
+        for host in params.journalnode_hosts:
+          if params.https_only:
+            uri = format("https://{host}:{journalnode_port}")
+          else:
+            uri = format("http://{host}:{journalnode_port}")
+          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab,
+                                                           params.smokeuser_principal, uri, "jn_service_check",
+                                                           params.kinit_path_local, False, None, params.smoke_user)
+          if not response:
+            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, errmsg)
+            return 1
+      else:
+        journalnode_port = params.journalnode_port
+        checkWebUIFileName = "checkWebUI.py"
+        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
+        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
+        File(checkWebUIFilePath,
+             content=StaticFile(checkWebUIFileName),
+             mode=0775)
+
+        Execute(checkWebUICmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5,
+                user=params.smoke_user
+        )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = as_user(format(
+          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.hdfs_user)
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HdfsServiceCheckWindows(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = functions.get_unique_id_and_date()
+
+    #Hadoop uses POSIX-style paths, separator is always /
+    dir = params.hdfs_tmp_dir
+    tmp_file = dir + '/' + unique
+
+    #commands for execution
+    hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+    create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
+    own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
+    test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
+    cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
+    create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
+    test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
+
+    hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
+    safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
+
+    Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
+    Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
+    Execute(own_dir, user=params.hdfs_user,logoutput=True)
+    Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
+    Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(test_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
new file mode 100644
index 0000000..e3aff9d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/setup_ranger_hdfs.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.format import format
+
+
+def setup_ranger_hdfs(upgrade_type=None):
+  import params
+
+  if params.has_ranger_admin:
+
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+
+    if params.xml_configurations_supported:
+        from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+        api_version=None
+        if params.stack_supports_ranger_kerberos:
+          api_version='v2'
+        setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
+                             params.downloaded_custom_connector, params.driver_curl_source,
+                             params.driver_curl_target, params.java_home,
+                             params.repo_name, params.hdfs_ranger_plugin_repo,
+                             params.ranger_env, params.ranger_plugin_properties,
+                             params.policy_user, params.policymgr_mgr_url,
+                             params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
+                             component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
+                             plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
+                             plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
+                             plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
+                             component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                             credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                             ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                             api_version=api_version ,stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,
+                             is_security_enabled = params.security_enabled,
+                             is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                             component_user_principal=params.nn_principal_name if params.security_enabled else None,
+                             component_user_keytab=params.nn_keytab if params.security_enabled else None)
+    else:
+        from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+
+        setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
+                            params.downloaded_custom_connector, params.driver_curl_source,
+                            params.driver_curl_target, params.java_home,
+                            params.repo_name, params.hdfs_ranger_plugin_repo,
+                            params.ranger_env, params.ranger_plugin_properties,
+                            params.policy_user, params.policymgr_mgr_url,
+                            params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
+                            component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
+                            plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
+                            plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
+                            plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
+                            component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                            credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                            ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                            stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+
+    if stack_version and params.upgrade_direction == Direction.UPGRADE:
+      # when upgrading to stack remove_ranger_hdfs_plugin_env, this env file must be removed
+      if check_stack_feature(StackFeature.REMOVE_RANGER_HDFS_PLUGIN_ENV, stack_version):
+        source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
+        target_file = source_file + ".bak"
+        Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))
+  else:
+    Logger.info('Ranger admin not installed')
+
+def create_ranger_audit_hdfs_directories():
+  import params
+
+  if params.has_ranger_admin:
+    if params.xml_configurations_supported and params.enable_ranger_hdfs and params.xa_audit_hdfs_is_enabled:
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True,
+      )
+      params.HdfsResource("/ranger/audit/hdfs",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0700,
+                         recursive_chmod=True,
+      )
+      params.HdfsResource(None, action="execute")
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
new file mode 100644
index 0000000..0f1f438
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/snamenode.py
@@ -0,0 +1,155 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from hdfs_snamenode import snamenode
+from hdfs import hdfs
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs("secondarynamenode")
+    snamenode(action="configure")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    snamenode(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    snamenode(action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    snamenode(action="status")
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SNameNodeDefault(SNameNode):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-secondarynamenode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
+                         'dfs.secondary.namenode.keytab.file',
+                         'dfs.secondary.namenode.kerberos.principal']
+    props_read_check = ['dfs.secondary.namenode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
+                                security_params['hdfs-site'][
+                                  'dfs.secondary.namenode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.snamenode_pid_file]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SNameNodeWindows(SNameNode):
+  pass
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
new file mode 100644
index 0000000..153f9a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/status_params.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  namenode_win_service_name = "namenode"
+  datanode_win_service_name = "datanode"
+  snamenode_win_service_name = "secondarynamenode"
+  journalnode_win_service_name = "journalnode"
+  zkfc_win_service_name = "zkfc"
+else:
+  hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+  datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+  namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+  snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+  journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+  nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
+  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
new file mode 100644
index 0000000..4577ad2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/utils.py
@@ -0,0 +1,383 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core import shell
+from resource_management.core.shell import as_user, as_sudo
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
+from resource_management.libraries.functions.show_logs import show_logs
+from ambari_commons.inet_utils import ensure_ssl_using_tls_v1
+from zkfc_slave import ZkfcSlaveDefault
+
+ensure_ssl_using_tls_v1()
+
+def safe_zkfc_op(action, env):
+  """
+  Idempotent operation on the zkfc process to either start or stop it.
+  :param action: start or stop
+  :param env: environment
+  """
+  Logger.info("Performing action {0} on zkfc.".format(action))
+  zkfc = None
+  if action == "start":
+    try:
+      ZkfcSlaveDefault.status_static(env)
+    except ComponentIsNotRunning:
+      ZkfcSlaveDefault.start_static(env)
+
+  if action == "stop":
+    try:
+      ZkfcSlaveDefault.status_static(env)
+    except ComponentIsNotRunning:
+      pass
+    else:
+      ZkfcSlaveDefault.stop_static(env)
+
+def initiate_safe_zkfc_failover():
+  """
+  If this is the active namenode, initiate a safe failover and wait for it to become the standby.
+
+  If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
+  will also have to start ZKFC manually.
+  """
+  import params
+
+  # Must kinit before running the HDFS command
+  if params.security_enabled:
+    Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+            user = params.hdfs_user)
+
+  active_namenode_id = None
+  standby_namenode_id = None
+  active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
+  if active_namenodes:
+    active_namenode_id = active_namenodes[0][0]
+  if standby_namenodes:
+    standby_namenode_id = standby_namenodes[0][0]
+
+  if active_namenode_id:
+    Logger.info(format("Active NameNode id: {active_namenode_id}"))
+  if standby_namenode_id:
+    Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
+  if unknown_namenodes:
+    for unknown_namenode in unknown_namenodes:
+      Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0]))
+
+  if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:
+    # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)
+    Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby"))
+
+    failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}")
+    check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby")
+
+    msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname)
+    Logger.info(msg)
+    code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
+    Logger.info(format("Rolling Upgrade - failover command returned {code}"))
+    wait_for_standby = False
+
+    if code == 0:
+      wait_for_standby = True
+    else:
+      # Try to kill ZKFC manually
+      was_zkfc_killed = kill_zkfc(params.hdfs_user)
+      code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
+      Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
+      if code == 255 and out:
+        Logger.info("Rolling Upgrade - NameNode is already down.")
+      else:
+        if was_zkfc_killed:
+          # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
+          wait_for_standby = True
+
+    if wait_for_standby:
+      Logger.info("Waiting for this NameNode to become the standby one.")
+      Execute(check_standby_cmd,
+              user=params.hdfs_user,
+              tries=50,
+              try_sleep=6,
+              logoutput=True)
+  else:
+    msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname)
+    Logger.info(msg)
+
+def kill_zkfc(zkfc_user):
+  """
+  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
+  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
+  Option 2. Silent failover
+  :param zkfc_user: User that started the ZKFC process.
+  :return: Return True if ZKFC was killed, otherwise, false.
+  """
+  import params
+  if params.dfs_ha_enabled:
+    if params.zkfc_pid_file:
+      check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
+      code, out = shell.call(check_process)
+      if code == 0:
+        Logger.debug("ZKFC is running and will be killed.")
+        kill_command = format("kill -15 `cat {zkfc_pid_file}`")
+        Execute(kill_command,
+                user=zkfc_user
+        )
+        File(params.zkfc_pid_file,
+             action = "delete",
+             )
+        return True
+  return False
+
+def service(action=None, name=None, user=None, options="", create_pid_dir=False,
+            create_log_dir=False):
+  """
+  :param action: Either "start" or "stop"
+  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
+  :param user: User to run the command as
+  :param options: Additional options to pass to command as a string
+  :param create_pid_dir: Create PID directory
+  :param create_log_dir: Crate log file directory
+  """
+  import params
+
+  options = options if options else ""
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+
+  # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
+  # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
+  if name == "nfs3" :
+    pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
+    custom_export = {
+      'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
+      'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
+      'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
+    }
+    hadoop_env_exports.update(custom_export)
+
+  process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
+
+  # on STOP directories shouldn't be created
+  # since during stop still old dirs are used (which were created during previous start)
+  if action != "stop":
+    if name == "nfs3":
+      Directory(params.hadoop_pid_dir_prefix,
+                mode=0755,
+                owner=params.root_user,
+                group=params.root_group
+      )
+    else:
+      Directory(params.hadoop_pid_dir_prefix,
+                  mode=0755,
+                  owner=params.hdfs_user,
+                  group=params.user_group
+      )
+    if create_pid_dir:
+      Directory(pid_dir,
+                owner=user,
+                group=params.user_group,
+                create_parents = True)
+    if create_log_dir:
+      if name == "nfs3":
+        Directory(log_dir,
+                  mode=0775,
+                  owner=params.root_user,
+                  group=params.user_group)
+      else:
+        Directory(log_dir,
+                  owner=user,
+                  group=params.user_group,
+                  create_parents = True)
+
+  if params.security_enabled and name == "datanode":
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    # At datanode_non_root stack version and further, we may start datanode as a non-root even in secure cluster
+    if not (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) or params.secure_dn_ports_are_in_use:
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+    if action == 'stop' and (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) and \
+      os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance 
+        user = "root"
+        
+        try:
+          check_process_status(hadoop_secure_dn_pid_file)
+          
+          custom_export = {
+            'HADOOP_SECURE_DN_USER': params.hdfs_user
+          }
+          hadoop_env_exports.update(custom_export)
+          
+        except ComponentIsNotRunning:
+          pass
+
+  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
+
+  if user == "root":
+    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
+    if options:
+      cmd += [options, ]
+    daemon_cmd = as_sudo(cmd)
+  else:
+    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
+    if options:
+      cmd += " " + options
+    daemon_cmd = as_user(cmd, user)
+     
+  if action == "start":
+    # remove pid file from dead process
+    File(pid_file, action="delete", not_if=process_id_exists_command)
+    
+    try:
+      Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
+    except:
+      show_logs(log_dir, user)
+      raise
+  elif action == "stop":
+    try:
+      Execute(daemon_cmd, only_if=process_id_exists_command, environment=hadoop_env_exports)
+    except:
+      show_logs(log_dir, user)
+      raise
+    File(pid_file, action="delete")
+
+def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
+  """
+  :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
+  If not preceded, will use the encrypted param to determine.
+  :param modeler_type: Modeler type to query using startswith function
+  :param metric: Metric to return
+  :return: Return an object representation of the metric, or None if it does not exist
+  """
+  if not nn_address or not modeler_type or not metric:
+    return None
+
+  nn_address = nn_address.strip()
+  if not nn_address.startswith("http"):
+    nn_address = ("https://" if encrypted else "http://") + nn_address
+  if not nn_address.endswith("/"):
+    nn_address = nn_address + "/"
+
+  nn_address = nn_address + "jmx"
+  Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
+
+  if security_enabled:
+    import params
+    data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
+                            "jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
+  else:
+    data = urllib2.urlopen(nn_address).read()
+  my_data = None
+  if data:
+    data_dict = json.loads(data)
+    if data_dict:
+      for el in data_dict['beans']:
+        if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
+          if metric in el:
+            my_data = el[metric]
+            if my_data:
+              my_data = json.loads(str(my_data))
+              break
+  return my_data
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None and len(m.groups()) >= 2:
+    return int(m.group(2))
+  else:
+    return None
+
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+def is_previous_fs_image():
+  """
+  Return true if there's a previous folder in the HDFS namenode directories.
+  """
+  import params
+  if params.dfs_name_dir:
+    nn_name_dirs = params.dfs_name_dir.split(',')
+    for nn_dir in nn_name_dirs:
+      prev_dir = os.path.join(nn_dir, "previous")
+      if os.path.isdir(prev_dir):
+        return True
+  return False
+
+def get_hdfs_binary(distro_component_name):
+  """
+  Get the hdfs binary to use depending on the stack and version.
+  :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
+  :return: The hdfs binary to use
+  """
+  import params
+  hdfs_binary = "hdfs"
+  if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
+    hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_root, distro_component_name)
+
+  return hdfs_binary
+
+def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
+  """
+  Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
+  :param hdfs_binary: path to hdfs binary to use
+  :param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
+  current namenode's address
+  :return: the constructed dfsadmin base command
+  """
+  import params
+  dfsadmin_base_command = ""
+  if params.dfs_ha_enabled and use_specific_namenode:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
+  else:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
+  return dfsadmin_base_command

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..f1891a5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/zkfc_slave.py
@@ -0,0 +1,225 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# this is needed to avoid a circular dependency since utils.py calls this class
+import utils
+from hdfs import hdfs
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.service import Service
+from resource_management.core import shell
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.version_select_util import get_component_version
+
+class ZkfcSlave(Script):
+  def get_component_name(self):
+    import params
+    if params.version_for_stack_feature_checks and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version_for_stack_feature_checks):
+      # params.version is not defined when installing cluster from blueprint
+      return "hadoop-hdfs-zkfc"
+    pass
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    
+  def configure(env):
+    ZkfcSlave.configure_static(env)
+    
+  @staticmethod
+  def configure_static(env):
+    import params
+    env.set_params(params)
+    hdfs("zkfc_slave")
+    pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ZkfcSlaveDefault(ZkfcSlave):
+
+  def start(self, env, upgrade_type=None):
+    ZkfcSlaveDefault.start_static(env, upgrade_type)
+    
+  @staticmethod
+  def start_static(env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    ZkfcSlave.configure_static(env)
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    # format the znode for this HA setup
+    # only run this format command if the active namenode hostname is set
+    # The Ambari UI HA Wizard prompts the user to run this command
+    # manually, so this guarantees it is only run in the Blueprints case
+    if params.dfs_ha_enabled and \
+       params.dfs_ha_namenode_active is not None:
+      success =  initialize_ha_zookeeper(params)
+      if not success:
+        raise Fail("Could not initialize HA state in zookeeper")
+
+    utils.service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+  
+  def stop(self, env, upgrade_type=None):
+    ZkfcSlaveDefault.stop_static(env, upgrade_type)
+
+  @staticmethod
+  def stop_static(env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    utils.service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+
+  def status(self, env):
+    ZkfcSlaveDefault.status_static(env)
+    
+  @staticmethod
+  def status_static(env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zkfc_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'core-site.xml': FILE_TYPE_XML})
+    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      if not result_issues:  # If all validations passed successfully
+        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
+          try:
+            cached_kinit_executor(status_params.kinit_path_local,
+                                  status_params.hdfs_user,
+                                  status_params.hdfs_user_keytab,
+                                  status_params.hdfs_user_principal,
+                                  status_params.hostname,
+                                  status_params.tmp_dir)
+            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+          except Exception as e:
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityStateErrorInfo": str(e)})
+        else:
+          self.put_structured_out(
+            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.hdfs_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hdfs_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.zkfc_pid_file]
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version) \
+        and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-zkfc", params.version)
+
+def initialize_ha_zookeeper(params):
+  try:
+    iterations = 10
+    formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive"
+    Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd))
+    for i in range(iterations):
+      Logger.info('Try %d out of %d' % (i+1, iterations))
+      code, out = shell.call(formatZK_cmd, logoutput=False, user=params.hdfs_user)
+      if code == 0:
+        Logger.info("HA state initialized in ZooKeeper successfully")
+        return True
+      elif code == 2:
+        Logger.info("HA state already initialized in ZooKeeper")
+        return True
+      else:
+        Logger.warning('HA state initialization in ZooKeeper failed with %d error code. Will retry' % (code))
+  except Exception as ex:
+    Logger.error('HA state initialization in ZooKeeper threw an exception. Reason %s' %(str(ex)))
+  return False
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ZkfcSlaveWindows(ZkfcSlave):
+  def start(self, env):
+    import params
+    self.configure(env)
+    Service(params.zkfc_win_service_name, action="start")
+
+  def stop(self, env):
+    import params
+    Service(params.zkfc_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+    env.set_params(status_params)
+    check_windows_service_status(status_params.zkfc_win_service_name)
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..a92cdc1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
new file mode 100644
index 0000000..fad5621
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/hdfs.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile {{hdfs_user_nofile_limit}}
+{{hdfs_user}}   - nproc  {{hdfs_user_nproc_limit}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/templates/slaves.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..5318ba0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"dfs.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"hdfs-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "namenode_ui",
+        "label": "NameNode UI",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_logs",
+        "label": "NameNode Logs",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_jmx",
+        "label": "NameNode JMX",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "Thread Stacks",
+        "label": "Thread Stacks",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
new file mode 100644
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+


[21/51] [abbrv] ambari git commit: AMBARI-19130 - Downgrade Can Create Multiple Mappings For Latest Configs (jonathanhurley)

Posted by sm...@apache.org.
AMBARI-19130 - Downgrade Can Create Multiple Mappings For Latest Configs (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/facfa8ce
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/facfa8ce
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/facfa8ce

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: facfa8cea364370434f9e3596147801d73c0d1f0
Parents: 82f9401
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Dec 7 20:55:38 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Dec 8 17:35:18 2016 -0500

----------------------------------------------------------------------
 .../entities/ClusterConfigMappingEntity.java    |  20 ++-
 .../server/state/cluster/ClusterImpl.java       | 104 +++++++++-----
 .../server/orm/dao/ServiceConfigDAOTest.java    | 144 +++++++++----------
 .../server/state/cluster/ClusterTest.java       |  95 ++++++++++++
 4 files changed, 253 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/facfa8ce/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
index 04c6030..5748dc9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
@@ -27,15 +27,17 @@ import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 
+import com.google.common.base.Objects;
+
 /**
  * Entity that maps to a cluster config mapping.
  */
-@Table(name = "clusterconfigmapping")
 @Entity
+@Table(name = "clusterconfigmapping")
 @IdClass(ClusterConfigMappingEntityPK.class)
-@NamedQueries({
-  @NamedQuery(name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
-    query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName")})
+@NamedQueries({ @NamedQuery(
+    name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
+    query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName") })
 
 public class ClusterConfigMappingEntity {
 
@@ -192,4 +194,14 @@ public class ClusterConfigMappingEntity {
 
     return true;
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this).add("clusterId", clusterId).add("type", typeName).add("tag",
+        tag).add("selected", selectedInd).add("created", createTimestamp).toString();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/facfa8ce/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 649fe38..b62c834 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -22,6 +22,7 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -144,8 +145,10 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Functions;
 import com.google.common.base.Predicate;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Maps;
@@ -3072,6 +3075,7 @@ public class ClusterImpl implements Cluster {
    * {@inheritDoc}
    */
   @Override
+  @Transactional
   public void applyLatestConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();
 
@@ -3079,36 +3083,33 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
 
+      // hash them for easier retrieval later - these are the same entity
+      // instances which exist on the cluster entity, so modification of the CCM
+      // entity here will affect the cluster CCM entities as well
+      ImmutableMap<Object, ClusterConfigMappingEntity> ccmMap = Maps.uniqueIndex(configMappingEntities, Functions.identity());
+
       // disable all configs
       for (ClusterConfigMappingEntity e : configMappingEntities) {
         LOG.debug("{} with tag {} is unselected", e.getType(), e.getTag());
         e.setSelected(0);
       }
 
-      List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+      // work through the in-memory list, finding only the most recent mapping per type
+      Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMappingsForStack(
           clusterEntity.getClusterId(), stackId);
 
-      Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMapping(
-          clusterConfigMappingsForStack);
-
-      // loop through all configs and set the latest to enabled for the
-      // specified stack
-      for(ClusterConfigMappingEntity configMappingEntity: configMappingEntities){
-        String type = configMappingEntity.getType();
-        String tag =  configMappingEntity.getTag();
+      for( ClusterConfigMappingEntity latestConfigMapping : latestConfigMappingByStack ){
+        ClusterConfigMappingEntity mapping = ccmMap.get(latestConfigMapping);
+        mapping.setSelected(1);
 
-        for (ClusterConfigMappingEntity latest : latestConfigMappingByStack) {
-          String latestType = latest.getType();
-          String latestTag = latest.getTag();
-
-          // find the latest config of a given mapping entity
-          if (StringUtils.equals(type, latestType) && StringUtils.equals(tag, latestTag)) {
-            LOG.info("{} with version tag {} is selected for stack {}", type, tag, stackId.toString());
-            configMappingEntity.setSelected(1);
-          }
-        }
+        LOG.info("Settting {} with version tag {} created on {} to selected for stack {}",
+            mapping.getType(), mapping.getTag(), new Date(mapping.getCreateTimestamp()),
+            stackId.toString());
       }
 
+      // since the entities which were modified came from the cluster entity's
+      // list to begin with, we can just save them right back - no need for a
+      // new collection since the CCM entity instances were modified directly
       clusterEntity.setConfigMappingEntities(configMappingEntities);
       clusterEntity = clusterDAO.merge(clusterEntity);
       clusterDAO.mergeConfigMappings(configMappingEntities);
@@ -3130,23 +3131,60 @@ public class ClusterImpl implements Cluster {
     jpaEventPublisher.publish(event);
   }
 
-  public Collection<ClusterConfigMappingEntity> getLatestConfigMapping(List<ClusterConfigMappingEntity> clusterConfigMappingEntities){
-    Map<String, ClusterConfigMappingEntity> temp = new HashMap<String, ClusterConfigMappingEntity>();
-    for (ClusterConfigMappingEntity e : clusterConfigMappingEntities) {
-      String type = e.getType();
-      if(temp.containsKey(type)){
-        ClusterConfigMappingEntity entityStored = temp.get(type);
-        Long timestampStored = entityStored.getCreateTimestamp();
-        Long timestamp = e.getCreateTimestamp();
-        if(timestamp > timestampStored){
-          temp.put(type, e); //find a newer config for the given type
-        }
-      } else {
-        temp.put(type, e); //first time encounter a type, add it
+  /**
+   * Retrieves all of the configuration mappings (selected and unselected) for
+   * the specified stack and then iterates through them, returning the most
+   * recent mapping for every type/tag combination.
+   * <p/>
+   * Because of how configuration revert works, mappings can be created for the
+   * same type/tag combinations. The only difference being that the timestamp
+   * reflects when each mapping was created.
+   * <p/>
+   * JPQL cannot be used directly here easily because some databases cannot
+   * support the necessary grouping and IN clause. For example: <br/>
+   *
+   * <pre>
+   * SELECT mapping FROM clusterconfigmappingentity mapping
+   *   WHERE (mapping.typename, mapping.createtimestamp) IN
+   *     (SELECT latest.typename, MAX(latest.createtimestamp)
+   *      FROM clusterconfigmappingentity latest
+   *      GROUP BY latest.typename)
+   * </pre>
+   *
+   * @param clusterId
+   *          the cluster ID
+   * @param stackId
+   *          the stack to retrieve the mappings for (not {@code null}).
+   * @return the most recent mapping (selected or unselected) for the specified
+   *         stack for every type.
+   */
+  public Collection<ClusterConfigMappingEntity> getLatestConfigMappingsForStack(long clusterId,
+      StackId stackId) {
+
+    // get all mappings for the specified stack (which could include
+    // duplicates since a config revert creates a duplicate mapping with a
+    // different timestamp)
+    List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+        clusterId, stackId);
+
+    Map<String, ClusterConfigMappingEntity> latestMappingsByType = new HashMap<String, ClusterConfigMappingEntity>();
+    for (ClusterConfigMappingEntity mapping : clusterConfigMappingsForStack) {
+      String type = mapping.getType();
+
+      if (!latestMappingsByType.containsKey(type)) {
+        latestMappingsByType.put(type, mapping);
+        continue;
+      }
+
+      ClusterConfigMappingEntity entityStored = latestMappingsByType.get(type);
+      Long timestampStored = entityStored.getCreateTimestamp();
+      Long timestamp = mapping.getCreateTimestamp();
+      if (timestamp > timestampStored) {
+        latestMappingsByType.put(type, mapping);
       }
     }
 
-    return temp.values();
+    return latestMappingsByType.values();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/facfa8ce/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 2388c11..aafe557 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -17,15 +17,11 @@
  */
 package org.apache.ambari.server.orm.dao;
 
-import static org.easymock.EasyMock.createMockBuilder;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Date;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -40,9 +36,12 @@ import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -393,7 +392,7 @@ public class ServiceConfigDAOTest {
     serviceConfigs = serviceConfigDAO.getLatestServiceConfigs(clusterId, HDP_02);
     Assert.assertEquals(2, serviceConfigs.size());
   }
-  
+
   @Test
   public void testConfiguration() throws Exception{
     initClusterEntities();
@@ -401,17 +400,17 @@ public class ServiceConfigDAOTest {
 
     Assert.assertTrue(!clusterEntity.getClusterConfigEntities().isEmpty());
     Assert.assertTrue(!clusterEntity.getConfigMappingEntities().isEmpty());
-    
+
     Assert.assertEquals(5, clusterEntity.getClusterConfigEntities().size());
     Assert.assertEquals(3, clusterEntity.getConfigMappingEntities().size());
   }
-  
+
   @Test
   public void testGetClusterConfigMappingByStack() throws Exception{
     initClusterEntities();
-    
+
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    
+
     List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
     Assert.assertEquals(2, clusterConfigMappingEntities .size());
 
@@ -420,14 +419,14 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("version1", tag1);
     String type1 = e1.getType();
     Assert.assertEquals("oozie-site", type1);
-    
+
     ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
     String tag2 = e2.getTag();
     Assert.assertEquals("version2", tag2);
     String type2 = e2.getType();
     Assert.assertEquals("oozie-site", type2);
   }
-  
+
   /**
    * Test the get latest configuration query against clusterconfig table with configuration groups inserted
    * */
@@ -435,9 +434,9 @@ public class ServiceConfigDAOTest {
   public void testGetClusterConfigMappingByStackCG() throws Exception{
     initClusterEntitiesWithConfigGroups();
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    
+
     List<ConfigGroupEntity> configGroupEntities = configGroupDAO.findAllByTag("OOZIE");
-    
+
     Assert.assertNotNull(configGroupEntities);
     ConfigGroupEntity configGroupEntity = configGroupEntities.get(0);
     Assert.assertNotNull(configGroupEntity);
@@ -447,7 +446,7 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("oozie_server", configGroupEntity.getGroupName());
     Assert.assertEquals("OOZIE", configGroupEntity.getTag());
     Assert.assertEquals("oozie server", configGroupEntity.getDescription());
-    
+
     List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
     Assert.assertEquals(2, clusterConfigMappingEntities .size());
 
@@ -456,97 +455,95 @@ public class ServiceConfigDAOTest {
     Assert.assertEquals("version1", tag1);
     String type1 = e1.getType();
     Assert.assertEquals("oozie-site", type1);
-    
+
     ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
     String tag2 = e2.getTag();
     Assert.assertEquals("version2", tag2);
     String type2 = e2.getType();
     Assert.assertEquals("oozie-site", type2);
   }
-  
+
   /**
-   * Test  
+   * Test
    *
-   * When the last configuration of a given configuration type to be stored into the clusterconfig table is 
+   * When the last configuration of a given configuration type to be stored into the clusterconfig table is
    * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
    *
    * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
    *
-   * Test to determine the latest configuration of a given type whose version_tag 
+   * Test to determine the latest configuration of a given type whose version_tag
    * exists in the clusterconfigmapping table.
    *
    * */
   @Test
-  public void testGetLatestClusterConfigMappingByStack() throws Exception{
-    ClusterImpl cluster =
-        createMockBuilder(ClusterImpl.class).
-          addMockedMethod("getSessionManager").
-          addMockedMethod("getClusterName").
-          addMockedMethod("getSessionAttributes").
-          createMock();
-    
+  public void testGetLatestClusterConfigMappingByStack() throws Exception {
+    Clusters clusters = injector.getInstance(Clusters.class);
+    clusters.addCluster("c1", HDP_01);
+
+    Cluster cluster = clusters.getCluster("c1");
+
     initClusterEntities();
-    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
-    Collection<ClusterConfigMappingEntity> latestMapingEntities = cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
+        cluster.getClusterId(), HDP_01);
+
     Assert.assertEquals(1, latestMapingEntities.size());
     for(ClusterConfigMappingEntity e: latestMapingEntities){
       Assert.assertEquals("version2", e.getTag());
       Assert.assertEquals("oozie-site", e.getType());
     }
   }
-  
+
   /**
-   * Test  
+   * Test
    *
-   * When the last configuration of a given configuration type to be stored into the clusterconfig table is 
+   * When the last configuration of a given configuration type to be stored into the clusterconfig table is
    * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
    *
    * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
    *
-   * Test to determine the latest configuration of a given type whose version_tag 
+   * Test to determine the latest configuration of a given type whose version_tag
    * exists in the clusterconfigmapping table.
    *
    * */
   @Test
   public void testGetLatestClusterConfigMappingByStackCG() throws Exception{
-    ClusterImpl cluster =
-        createMockBuilder(ClusterImpl.class).
-          addMockedMethod("getSessionManager").
-          addMockedMethod("getClusterName").
-          addMockedMethod("getSessionAttributes").
-          createMock();
-    
+    Clusters clusters = injector.getInstance(Clusters.class);
+    clusters.addCluster("c1", HDP_01);
+
+    Cluster cluster = clusters.getCluster("c1");
+
     initClusterEntitiesWithConfigGroups();
-    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
-    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
-    Collection<ClusterConfigMappingEntity> latestMapingEntities = cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
+        cluster.getClusterId(), HDP_01);
+
     Assert.assertEquals(1, latestMapingEntities.size());
     for(ClusterConfigMappingEntity e: latestMapingEntities){
       Assert.assertEquals("version2", e.getTag());
       Assert.assertEquals("oozie-site", e.getType());
     }
   }
-  
+
   private void initClusterEntities() throws Exception{
     String userName = "admin";
-    
+
     ServiceConfigEntity oozieServiceConfigEntity = createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), null);
     ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
-    
+
     Long clusterId = clusterEntity.getClusterId();
-    
+
     if(null == clusterId){
       clusterId = 1L;
       clusterEntity.setClusterId(clusterId);
       clusterEntity = clusterDAO.merge(clusterEntity);
     }
-    
+
     StackEntity stackEntityHDP01 = stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
     StackEntity stackEntityHDP02 = stackDAO.find(HDP_02.getStackName(),HDP_02.getStackVersion());
-    
+
     String oozieSite = "oozie-site";
-    
+
     for (int i = 1; i < 6; i++){
       ClusterConfigEntity entity = new ClusterConfigEntity();
       entity.setClusterEntity(clusterEntity);
@@ -555,22 +552,23 @@ public class ServiceConfigDAOTest {
       entity.setVersion(Long.valueOf(i));
       entity.setTag("version"+i);
       entity.setTimestamp(new Date().getTime());
-      if(i < 4)
+      if(i < 4) {
         entity.setStack(stackEntityHDP01);
-      else
+      } else {
         entity.setStack(stackEntityHDP02);
+      }
       entity.setData("");
       clusterDAO.createConfig(entity);
       clusterEntity.getClusterConfigEntities().add(entity);
       clusterDAO.merge(clusterEntity);
     }
-    
+
     Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
     if(null == entities){
       entities = new ArrayList<ClusterConfigMappingEntity>();
       clusterEntity.setConfigMappingEntities(entities);
-    }  
-    
+    }
+
     ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
     e1.setClusterEntity(clusterEntity);
     e1.setClusterId(clusterEntity.getClusterId());
@@ -581,7 +579,7 @@ public class ServiceConfigDAOTest {
     e1.setTag("version1");
     entities.add(e1);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
     e2.setClusterEntity(clusterEntity);
     e2.setClusterId(clusterEntity.getClusterId());
@@ -592,7 +590,7 @@ public class ServiceConfigDAOTest {
     e2.setTag("version2");
     entities.add(e2);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e3 = new ClusterConfigMappingEntity();
     e3.setClusterEntity(clusterEntity);
     e3.setClusterId(clusterEntity.getClusterId());
@@ -603,25 +601,25 @@ public class ServiceConfigDAOTest {
     e3.setTag("version4");
     entities.add(e3);
     clusterDAO.merge(clusterEntity);
-  } 
-  
+  }
+
   private void initClusterEntitiesWithConfigGroups() throws Exception{
     String userName = "admin";
-    
+
     ServiceConfigEntity oozieServiceConfigEntity = createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), null);
     ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
-    
+
     Long clusterId = clusterEntity.getClusterId();
-    
+
     if(null == clusterId){
       clusterId = 1L;
       clusterEntity.setClusterId(clusterId);
       clusterEntity = clusterDAO.merge(clusterEntity);
     }
-    
+
     StackEntity stackEntityHDP01 = stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
     String oozieSite = "oozie-site";
-    
+
     int count = 3;
     for (int i = 1; i < count; i++){
       ClusterConfigEntity entity = new ClusterConfigEntity();
@@ -637,13 +635,13 @@ public class ServiceConfigDAOTest {
       clusterEntity.getClusterConfigEntities().add(entity);
       clusterDAO.merge(clusterEntity);
     }
-    
+
     Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
     if(null == entities){
       entities = new ArrayList<ClusterConfigMappingEntity>();
       clusterEntity.setConfigMappingEntities(entities);
-    }  
-    
+    }
+
     ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
     e1.setClusterEntity(clusterEntity);
     e1.setClusterId(clusterEntity.getClusterId());
@@ -654,7 +652,7 @@ public class ServiceConfigDAOTest {
     e1.setTag("version1");
     entities.add(e1);
     clusterDAO.merge(clusterEntity);
-    
+
     ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
     e2.setClusterEntity(clusterEntity);
     e2.setClusterId(clusterEntity.getClusterId());
@@ -665,7 +663,7 @@ public class ServiceConfigDAOTest {
     e2.setTag("version2");
     entities.add(e2);
     clusterDAO.merge(clusterEntity);
-    
+
     ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
 
     ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
@@ -695,9 +693,9 @@ public class ServiceConfigDAOTest {
     List<ClusterConfigEntity> configEntities = new
       ArrayList<ClusterConfigEntity>();
     configEntities.add(configEntity);
-    
+
     configGroupDAO.create(configGroupEntity);
-    
+
     if (configEntities != null && !configEntities.isEmpty()) {
       List<ConfigGroupConfigMappingEntity> configMappingEntities = new
         ArrayList<ConfigGroupConfigMappingEntity>();
@@ -719,7 +717,7 @@ public class ServiceConfigDAOTest {
         configMappingEntities.add(configMappingEntity);
         configGroupConfigMappingDAO.create(configMappingEntity);
       }
-      
+
       configGroupEntity.setConfigGroupConfigMappingEntities(configMappingEntities);
       configGroupDAO.merge(configGroupEntity);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/facfa8ce/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index fc3646a..daa3abc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -2467,6 +2467,101 @@ public class ClusterTest {
   }
 
   /**
+   * Tests that {@link Cluster#applyLatestConfigurations(StackId)} sets the
+   * right configs to enabled when there are duplicate mappings for type/tag.
+   * Only the most recent should be enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testApplyLatestConfigurationsWithMultipleMappings() throws Exception {
+    createDefaultCluster();
+    Cluster cluster = clusters.getCluster("c1");
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    StackId stackId = cluster.getCurrentStackVersion();
+
+    StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+
+    String configType = "foo-type";
+    String configTag = "version-1";
+
+    // create the config for the mappings
+    ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
+    clusterConfig.setClusterEntity(clusterEntity);
+    clusterConfig.setConfigId(1L);
+    clusterConfig.setStack(currentStack);
+    clusterConfig.setTag(configTag);
+    clusterConfig.setData("{}");
+    clusterConfig.setType(configType);
+    clusterConfig.setTimestamp(1L);
+    clusterConfig.setVersion(1L);
+
+    clusterDAO.createConfig(clusterConfig);
+    clusterEntity.getClusterConfigEntities().add(clusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // create 3 mappings for the same type/tag, each with a different time
+
+    // config mapping 1
+    ClusterConfigMappingEntity configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(1L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    // config mapping 2
+    configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(2L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    // config mapping 3
+    configMapping = new ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(3L);
+    configMapping.setSelected(0);
+    configMapping.setTag(configTag);
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // check all 3 mappings are disabled
+    Collection<ClusterConfigMappingEntity> clusterConfigMappings = clusterEntity.getConfigMappingEntities();
+    Assert.assertEquals(3, clusterConfigMappings.size());
+    for (ClusterConfigMappingEntity clusterConfigMapping : clusterConfigMappings) {
+      Assert.assertEquals(0, clusterConfigMapping.isSelected());
+    }
+
+    // apply configurations and check to see we've set the one with the latest
+    // timestamp ONLY
+    cluster.applyLatestConfigurations(cluster.getCurrentStackVersion());
+    clusterEntity = clusterDAO.findByName("c1");
+
+    // now check that the new config mapping is enabled
+    clusterConfigMappings = clusterEntity.getConfigMappingEntities();
+    Assert.assertEquals(3, clusterConfigMappings.size());
+    for (ClusterConfigMappingEntity clusterConfigMapping : clusterConfigMappings) {
+      if (clusterConfigMapping.getCreateTimestamp() < 3) {
+        Assert.assertEquals(0, clusterConfigMapping.isSelected());
+      } else {
+        Assert.assertEquals(1, clusterConfigMapping.isSelected());
+      }
+    }
+  }
+
+  /**
    * Tests that applying configurations for a given stack correctly sets
    * {@link DesiredConfig}s.
    */


[04/51] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-18456' into trunk

Posted by sm...@apache.org.
Merge branch 'branch-feature-AMBARI-18456' into trunk


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/704170e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/704170e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/704170e4

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 704170e4e1c960d90f325660ea1137be0ac3db42
Parents: 4a565d3
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Dec 7 16:49:43 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Dec 7 20:25:16 2016 -0500

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         |  13 +-
 .../internal/ConfigGroupResourceProvider.java   |  60 +-
 .../serveraction/upgrades/ConfigureAction.java  |  16 +-
 .../serveraction/upgrades/FixLzoCodecPath.java  |  16 +-
 .../upgrades/FixOozieAdminUsers.java            |   9 +-
 .../upgrades/HBaseConfigCalculation.java        |  14 +-
 .../HBaseEnvMaxDirectMemorySizeAction.java      |  13 +-
 .../upgrades/HiveEnvClasspathAction.java        |  13 +-
 .../upgrades/HiveZKQuorumConfigAction.java      |   2 +-
 .../upgrades/OozieConfigCalculation.java        |  13 +-
 .../upgrades/RangerConfigCalculation.java       |   4 +-
 .../RangerKerberosConfigCalculation.java        |  20 +-
 .../upgrades/RangerKmsProxyConfig.java          |   3 +-
 .../upgrades/SparkShufflePropertyConfig.java    |   3 +-
 .../upgrades/YarnConfigCalculation.java         |   2 +-
 .../org/apache/ambari/server/state/Config.java  |  22 +-
 .../ambari/server/state/ConfigFactory.java      |  20 +-
 .../apache/ambari/server/state/ConfigImpl.java  | 480 +++++++--------
 .../server/state/cluster/ClusterImpl.java       |   6 +-
 .../server/state/configgroup/ConfigGroup.java   |  33 +-
 .../state/configgroup/ConfigGroupFactory.java   |  34 +-
 .../state/configgroup/ConfigGroupImpl.java      | 613 +++++++++----------
 .../ambari/server/topology/AmbariContext.java   |  25 +-
 .../ambari/server/update/HostUpdateHelper.java  |  10 +-
 .../ExecutionCommandWrapperTest.java            |  17 +-
 .../TestActionSchedulerThreading.java           |  19 +-
 .../server/agent/HeartbeatTestHelper.java       |   6 +-
 .../server/agent/TestHeartbeatMonitor.java      |  13 +-
 .../configuration/RecoveryConfigHelperTest.java |   2 +-
 .../AmbariManagementControllerImplTest.java     |  22 +-
 .../AmbariManagementControllerTest.java         | 109 +---
 .../UpgradeResourceProviderHDP22Test.java       |  14 +-
 .../internal/UpgradeResourceProviderTest.java   |  13 +-
 .../ComponentVersionCheckActionTest.java        |  19 +-
 .../upgrades/ConfigureActionTest.java           |  96 +--
 .../upgrades/FixOozieAdminUsersTest.java        |  76 +--
 .../HBaseEnvMaxDirectMemorySizeActionTest.java  | 187 +++---
 .../upgrades/HiveEnvClasspathActionTest.java    | 148 ++---
 .../upgrades/HiveZKQuorumConfigActionTest.java  |   2 +-
 .../upgrades/KerberosKeytabsActionTest.java     |  28 +-
 .../upgrades/RangerConfigCalculationTest.java   |  72 +--
 .../RangerKerberosConfigCalculationTest.java    | 173 ++----
 .../upgrades/RangerKmsProxyConfigTest.java      |  36 +-
 .../SparkShufflePropertyConfigTest.java         |  30 +-
 .../upgrades/UpgradeActionTest.java             |  28 +-
 .../ambari/server/state/ConfigGroupTest.java    |  26 +-
 .../ambari/server/state/ConfigHelperTest.java   |  49 +-
 .../state/alerts/AlertReceivedListenerTest.java |   8 +-
 .../state/cluster/ClusterDeadlockTest.java      |  17 +-
 .../server/state/cluster/ClusterTest.java       | 133 +---
 .../server/state/cluster/ClustersTest.java      |   8 +-
 ...omponentHostConcurrentWriteDeadlockTest.java |   9 +-
 .../ambari/server/state/host/HostTest.java      |   6 +-
 .../svccomphost/ServiceComponentHostTest.java   |  24 +-
 .../server/topology/AmbariContextTest.java      |  38 +-
 .../server/update/HostUpdateHelperTest.java     |  40 +-
 .../ambari/server/utils/StageUtilsTest.java     |   4 +
 57 files changed, 1198 insertions(+), 1718 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 0bab2a9..c9a3e04 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -55,7 +55,6 @@ import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -80,10 +79,10 @@ import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
@@ -895,17 +894,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   @Override
   public Config createConfig(Cluster cluster, String type, Map<String, String> properties,
                              String versionTag, Map<String, Map<String, String>> propertiesAttributes) {
-    Config config = configFactory.createNew(cluster, type,
-      properties, propertiesAttributes);
 
-    if (!StringUtils.isEmpty(versionTag)) {
-      config.setTag(versionTag);
-    }
-
-    config.persist();
+    Config config = configFactory.createNew(cluster, type, versionTag, properties,
+        propertiesAttributes);
 
     cluster.addConfig(config);
-
     return config;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index 96bb8f9..2373068 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.ConfigGroupNotFoundException;
@@ -48,7 +57,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
@@ -56,15 +65,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Inject;
 
 @StaticallyInject
 public class ConfigGroupResourceProvider extends
@@ -102,6 +103,12 @@ public class ConfigGroupResourceProvider extends
   private static HostDAO hostDAO;
 
   /**
+   * Used for creating {@link Config} instances to return in the REST response.
+   */
+  @Inject
+  private static ConfigFactory configFactory;
+
+  /**
    * Create a  new resource provider for the given management controller.
    *
    * @param propertyIds          the property ids
@@ -568,22 +575,19 @@ public class ConfigGroupResourceProvider extends
         }
       }
 
+      configLogger.info("User {} is creating new configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       ConfigGroup configGroup = configGroupFactory.createNew(cluster,
         request.getGroupName(),
         request.getTag(), request.getDescription(),
         request.getConfigs(), hosts);
 
-      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
       configGroup.setServiceName(serviceName);
 
-      // Persist before add, since id is auto-generated
-      configLogger.info("Persisting new Config group"
-        + ", clusterName = " + cluster.getClusterName()
-        + ", name = " + configGroup.getName()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
+      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
@@ -634,6 +638,11 @@ public class ConfigGroupResourceProvider extends
                                  + ", clusterName = " + request.getClusterName()
                                  + ", groupId = " + request.getId());
       }
+
+      configLogger.info("User {} is updating configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       String serviceName = configGroup.getServiceName();
       String requestServiceName = cluster.getServiceForConfigTypes(request.getConfigs().keySet());
       if (StringUtils.isEmpty(serviceName) && StringUtils.isEmpty(requestServiceName)) {
@@ -682,13 +691,6 @@ public class ConfigGroupResourceProvider extends
       configGroup.setDescription(request.getDescription());
       configGroup.setTag(request.getTag());
 
-      configLogger.info("Persisting updated Config group"
-        + ", clusterName = " + configGroup.getClusterName()
-        + ", id = " + configGroup.getId()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
-
-      configGroup.persist();
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
           request.getServiceConfigVersionNote(), configGroup);
@@ -781,11 +783,7 @@ public class ConfigGroupResourceProvider extends
             }
           }
 
-          Config config = new ConfigImpl(type);
-          config.setTag(tag);
-          config.setProperties(configProperties);
-          config.setPropertiesAttributes(configAttributes);
-
+          Config config = configFactory.createReadOnly(type, tag, configProperties, configAttributes);
           configurations.put(config.getType(), config);
         }
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index 5459ddb..97280ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -451,7 +451,7 @@ public class ConfigureAction extends AbstractServerAction {
     // of creating a whole new history record since it was already done
     if (!targetStack.equals(currentStack) && targetStack.equals(configStack)) {
       config.setProperties(newValues);
-      config.persist(false);
+      config.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
     }
@@ -570,8 +570,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(Replace replacement: replacements){
       if(isOperationAllowed(cluster, configType, replacement.key,
-          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState))
+          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState)) {
         allowedReplacements.add(replacement);
+      }
     }
 
     return allowedReplacements;
@@ -582,8 +583,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(ConfigurationKeyValue configurationKeyValue: sets){
       if(isOperationAllowed(cluster, configType, configurationKeyValue.key,
-          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState))
+          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState)) {
         allowedSets.add(configurationKeyValue);
+      }
     }
 
     return allowedSets;
@@ -593,14 +595,16 @@ public class ConfigureAction extends AbstractServerAction {
     List<Transfer> allowedTransfers = new ArrayList<>();
     for (Transfer transfer : transfers) {
       String key = "";
-      if(transfer.operation == TransferOperation.DELETE)
+      if(transfer.operation == TransferOperation.DELETE) {
         key = transfer.deleteKey;
-      else
+      } else {
         key = transfer.fromKey;
+      }
 
       if(isOperationAllowed(cluster, configType, key,
-          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState))
+          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState)) {
         allowedTransfers.add(transfer);
+      }
     }
 
     return allowedTransfers;

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
index ffa21ab..4833729 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,13 +32,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -78,7 +76,7 @@ public class FixLzoCodecPath extends AbstractServerAction {
         }
       }
       config.setProperties(properties);
-      config.persist(false);
+      config.save();
     }
     if (modifiedProperties.isEmpty()) {
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
index 3a06476..75588d5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
@@ -18,7 +18,9 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,8 +30,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -86,7 +87,7 @@ public class FixOozieAdminUsers extends AbstractServerAction {
     oozieProperties.put(OOZIE_ADMIN_USERS_PROP, newOozieAdminUsers);
 
     oozieConfig.setProperties(oozieProperties);
-    oozieConfig.persist(false);
+    oozieConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
             String.format("Set oozie admin users to %s", newOozieAdminUsers), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
index 7f6d4b1..739dd7e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
@@ -18,7 +18,10 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.math.BigDecimal;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,9 +30,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.math.BigDecimal;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase properties.  This class is only used when moving from
@@ -79,8 +80,9 @@ public class HBaseConfigCalculation extends AbstractServerAction {
                                    "Upper or lower memstore limit setting value is malformed, skipping", "");
     }
 
-    if (lowerLimit.scale() < 2) //make sure result will have at least 2 digits after decimal point
+    if (lowerLimit.scale() < 2) {
       lowerLimit = lowerLimit.setScale(2, BigDecimal.ROUND_HALF_UP);
+    }
     BigDecimal lowerLimitNew = lowerLimit.divide(upperLimit, BigDecimal.ROUND_HALF_UP);
 
     properties.put(NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString());
@@ -90,7 +92,7 @@ public class HBaseConfigCalculation extends AbstractServerAction {
     properties.remove(OLD_LOWER_LIMIT_PROPERTY_NAME);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("%s was set to %s", NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString()), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
index b238bca..fb15555 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase Env content property.
@@ -79,7 +80,7 @@ public class HBaseEnvMaxDirectMemorySizeAction extends AbstractServerAction {
     properties.put(CONTENT_NAME, appendedContent);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("The %s/%s property was appended with %s", SOURCE_CONFIG_TYPE, CONTENT_NAME, APPEND_CONTENT_LINE),"");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
index 0e10160..c5000bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Append hive-env config type with HIVE_HOME and HIVE_CONF_DIR variables if they are absent
@@ -103,7 +104,7 @@ public class HiveEnvClasspathAction extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("Added %s, %s to content at %s", HIVE_CONF_DIR, HIVE_HOME, TARGET_CONFIG_TYPE), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
index 0ade30b..7ebad08 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
@@ -85,7 +85,7 @@ public class HiveZKQuorumConfigAction extends AbstractServerAction {
     hiveSiteProperties.put(HIVE_SITE_ZK_CONNECT_STRING, zookeeperQuorum);
 
     hiveSite.setProperties(hiveSiteProperties);
-    hiveSite.persist(false);
+    hiveSite.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("Successfully set %s and %s in %s", HIVE_SITE_ZK_QUORUM,

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
index 4da67ca..9b8a7dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Changes oozie-env during upgrade (adds -Dhdp.version to $HADOOP_OPTS variable)
@@ -67,7 +68,7 @@ public class OozieConfigCalculation extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("Added -Dhdp.version to $HADOOP_OPTS variable at %s", TARGET_CONFIG_TYPE), "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
index ff4a20e..8e0161b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
@@ -141,13 +141,13 @@ public class RangerConfigCalculation extends AbstractServerAction {
     targetValues.put("ranger.jpa.audit.jdbc.dialect", dialect);
 
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     config = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);
     targetValues = config.getProperties();
     targetValues.put("ranger_privelege_user_jdbc_url", userJDBCUrl);
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", stdout.toString(), "");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
index ba0da79..c059c9e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
@@ -87,7 +87,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hadoopUser) {
         targetValues.put(RANGER_PLUGINS_HDFS_SERVICE_USER, hadoopUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HDFS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hdfs_user", HADOOP_ENV_CONFIG_TYPE);
@@ -104,7 +104,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hiveUser) {
         targetValues.put(RANGER_PLUGINS_HIVE_SERVICE_USER, hiveUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HIVE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hive_user", HIVE_ENV_CONFIG_TYPE);
@@ -121,7 +121,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != yarnUser) {
         targetValues.put(RANGER_PLUGINS_YARN_SERVICE_USER, yarnUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_YARN_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "yarn_user", YARN_ENV_CONFIG_TYPE);
@@ -138,7 +138,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hbaseUser) {
         targetValues.put(RANGER_PLUGINS_HBASE_SERVICE_USER, hbaseUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HBASE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hbase_user", HBASE_ENV_CONFIG_TYPE);
@@ -155,7 +155,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != knoxUser) {
         targetValues.put(RANGER_PLUGINS_KNOX_SERVICE_USER, knoxUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KNOX_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "knox_user", KNOX_ENV_CONFIG_TYPE);
@@ -190,7 +190,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         }
         targetValues.put(RANGER_PLUGINS_STORM_SERVICE_USER, stormValue);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_STORM_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "storm_user", STORM_ENV_CONFIG_TYPE);
@@ -207,7 +207,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != kafkaUser) {
         targetValues.put(RANGER_PLUGINS_KAFKA_SERVICE_USER, kafkaUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KAFKA_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kafka_user", KAFKA_ENV_CONFIG_TYPE);
@@ -224,7 +224,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != rangerKmsUser) {
         targetValues.put(RANGER_PLUGINS_KMS_SERVICE_USER, rangerKmsUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KMS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kms_user", RANGER_KMS_ENV_CONFIG_TYPE);
@@ -243,10 +243,10 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         if (null != spnegoKeytab) {
           targetValues.put(RANGER_SPNEGO_KEYTAB, spnegoKeytab);
           rangerAdminconfig.setProperties(targetValues);
-          rangerAdminconfig.persist(false);
+          rangerAdminconfig.save();
           sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_KEYTAB);
         } else {
-          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);          
+          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);
         }
 
       } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
index bb88f55..25387cc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
@@ -29,7 +29,6 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 
@@ -83,7 +82,7 @@ public class RangerKmsProxyConfig extends AbstractServerAction {
       targetValues.put(groupProp, "*");
       targetValues.put(hostProp, "*");
       kmsSite.setProperties(targetValues);
-      kmsSite.persist(false);
+      kmsSite.save();
       outputMsg = outputMsg + MessageFormat.format("Successfully added properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);
     } else {
       outputMsg = outputMsg +  MessageFormat.format("Kerberos not enable, not setting proxy properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
index 299a373..b1aa6e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.serveraction.AbstractServerAction;
@@ -89,7 +88,7 @@ public class SparkShufflePropertyConfig extends AbstractServerAction {
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES, newAuxServices);
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS, YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS_VALUE);
       yarnSiteConfig.setProperties(yarnSiteProperties);
-      yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set to %s",

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
index feefcaf..d638858 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
@@ -67,7 +67,7 @@ public class YarnConfigCalculation extends AbstractServerAction {
     yarnSiteProperties.put(YARN_RM_ZK_ADDRESS_PROPERTY_NAME, zkServersStr);
     yarnSiteProperties.put(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME, zkServersStr);
     yarnSiteConfig.setProperties(yarnSiteProperties);
-    yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set from %s to %s",

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
index b35aad9..67570f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
@@ -30,8 +30,6 @@ public interface Config {
 
   void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes);
 
-  void setStackId(StackId stackId);
-
   /**
    * @return Config Type
    */
@@ -66,18 +64,6 @@ public interface Config {
   public Map<String, Map<String, String>> getPropertiesAttributes();
 
   /**
-   * Change the version tag
-   * @param versionTag
-   */
-  public void setTag(String versionTag);
-
-  /**
-   * Set config version
-   * @param version
-   */
-  public void setVersion(Long version);
-
-  /**
    * Replace properties with new provided set
    * @param properties Property Map to replace existing one
    */
@@ -110,11 +96,5 @@ public interface Config {
   /**
    * Persist the configuration.
    */
-  public void persist();
-
-  /**
-   * Persist the configuration, optionally creating a new config entity.
-   */
-  public void persist(boolean newConfig);
-
+  public void save();
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
index eaf68aa..d6cd997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
@@ -27,18 +27,20 @@ import com.google.inject.assistedinject.Assisted;
  * Factory for creating configuration objects using {@link Assisted} constructor parameters
  */
 public interface ConfigFactory {
-  
+
   /**
    * Creates a new {@link Config} object using provided values.
    *
    * @param cluster
    * @param type
+   * @param tag
    * @param map
    * @param mapAttributes
    * @return
    */
-  Config createNew(Cluster cluster, String type, Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
-  
+  Config createNew(Cluster cluster, @Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
+
   /**
    * Creates a new {@link Config} object using provided entity
    *
@@ -48,4 +50,16 @@ public interface ConfigFactory {
    */
   Config createExisting(Cluster cluster, ClusterConfigEntity entity);
 
+  /**
+   * Creates a read-only instance of a {@link Config} suitable for returning in
+   * REST responses.
+   *
+   * @param type
+   * @param tag
+   * @param map
+   * @param mapAttributes
+   * @return
+   */
+  Config createReadOnly(@Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 1f52e6a..0a861d8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -18,27 +18,29 @@
 
 package org.apache.ambari.server.state;
 
-import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.annotation.Nullable;
 
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
+import com.google.gson.JsonSyntaxException;
 import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
@@ -49,52 +51,113 @@ public class ConfigImpl implements Config {
    */
   private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
 
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
+
   public static final String GENERATED_TAG_PREFIX = "generatedTag_";
 
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final long configId;
+  private final Cluster cluster;
+  private final StackId stackId;
+  private final String type;
+  private final String tag;
+  private final Long version;
 
-  private Cluster cluster;
-  private StackId stackId;
-  private String type;
-  private volatile String tag;
-  private volatile Long version;
-  private volatile Map<String, String> properties;
-  private volatile Map<String, Map<String, String>> propertiesAttributes;
-  private ClusterConfigEntity entity;
-  private volatile Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+  /**
+   * The properties of this configuration. This cannot be a
+   * {@link ConcurrentMap} since we allow null values. Therefore, it must be
+   * synchronized externally.
+   */
+  private Map<String, String> properties;
 
-  @Inject
-  private ClusterDAO clusterDAO;
+  /**
+   * A lock for reading/writing of {@link #properties} concurrently.
+   *
+   * @see #properties
+   */
+  private final ReadWriteLock propertyLock;
 
-  @Inject
-  private Gson gson;
+  /**
+   * The property attributes for this configuration.
+   */
+  private Map<String, Map<String, String>> propertiesAttributes;
+
+  private Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+
+  private final ClusterDAO clusterDAO;
+
+  private final Gson gson;
 
   @Inject
   private ServiceConfigDAO serviceConfigDAO;
 
-  @Inject
-  private AmbariEventPublisher eventPublisher;
+  private final AmbariEventPublisher eventPublisher;
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted String type, @Assisted Map<String, String> properties,
-      @Assisted Map<String, Map<String, String>> propertiesAttributes, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
     this.type = type;
     this.properties = properties;
-    this.propertiesAttributes = propertiesAttributes;
+
+    // only set this if it's non-null
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    version = cluster.getNextConfigVersion(type);
+
+    // tag is nullable from factory but not in the DB, so ensure we generate something
+    tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+    this.tag = tag;
+
+    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+    ClusterConfigEntity entity = new ClusterConfigEntity();
+    entity.setClusterEntity(clusterEntity);
+    entity.setClusterId(cluster.getClusterId());
+    entity.setType(type);
+    entity.setVersion(version);
+    entity.setTag(this.tag);
+    entity.setTimestamp(System.currentTimeMillis());
+    entity.setStack(clusterEntity.getDesiredStack());
+    entity.setData(gson.toJson(properties));
+
+    if (null != propertiesAttributes) {
+      entity.setAttributes(gson.toJson(propertiesAttributes));
+    }
 
     // when creating a brand new config without a backing entity, use the
     // cluster's desired stack as the config's stack
     stackId = cluster.getDesiredStackVersion();
-
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
-  }
+    persist(entity);
 
+    configId = entity.getConfigId();
+  }
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity,
+      ClusterDAO clusterDAO, Gson gson, AmbariEventPublisher eventPublisher,
+      LockFactory lockFactory) {
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    configId = entity.getConfigId();
+
     type = entity.getType();
     tag = entity.getTag();
     version = entity.getVersion();
@@ -102,16 +165,71 @@ public class ConfigImpl implements Config {
     // when using an existing entity, use the actual value of the entity's stack
     stackId = new StackId(entity.getStack());
 
-    this.entity = entity;
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, String> deserializedProperties = gson.<Map<String, String>> fromJson(
+          entity.getData(), Map.class);
+
+      if (null == deserializedProperties) {
+        deserializedProperties = new HashMap<>();
+      }
+
+      properties = deserializedProperties;
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration JSON stored in the database for {}/{}", entity.getType(),
+          entity.getTag());
+    }
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, Map<String, String>> deserializedAttributes = gson.<Map<String, Map<String, String>>> fromJson(
+          entity.getAttributes(), Map.class);
+
+      if (null != deserializedAttributes) {
+        propertiesAttributes = new HashMap<>(deserializedAttributes);
+      }
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration attribute JSON stored in the database for {}/{}",
+          entity.getType(), entity.getTag());
+    }
   }
 
   /**
-   * Constructor for clients not using factory.
+   * Constructor. This will create an instance suitable only for
+   * representation/serialization as it is incomplete.
+   *
+   * @param type
+   * @param tag
+   * @param properties
+   * @param propertiesAttributes
+   * @param clusterDAO
+   * @param gson
+   * @param eventPublisher
    */
-  public ConfigImpl(String type) {
+  @AssistedInject
+  ConfigImpl(@Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
+    this.tag = tag;
     this.type = type;
+    this.properties = new HashMap<>(properties);
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+
+    cluster = null;
+    configId = 0;
+    version = 0L;
+    stackId = null;
   }
 
   /**
@@ -119,232 +237,124 @@ public class ConfigImpl implements Config {
    */
   @Override
   public StackId getStackId() {
-    readWriteLock.readLock().lock();
-    try {
-      return stackId;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return stackId;
   }
 
   @Override
   public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
-    readWriteLock.readLock().lock();
-    try {
-      return propertiesTypes;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return propertiesTypes;
   }
 
   @Override
   public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesTypes = propertiesTypes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void setStackId(StackId stackId) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.stackId = stackId;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesTypes = propertiesTypes;
   }
 
   @Override
   public String getType() {
-    readWriteLock.readLock().lock();
-    try {
-      return type;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return type;
   }
 
   @Override
   public String getTag() {
-    if (tag == null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (tag == null) {
-          tag = GENERATED_TAG_PREFIX + getVersion();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-
-      return tag;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return tag;
   }
 
   @Override
   public Long getVersion() {
-    if (version == null && cluster != null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (version == null) {
-          version = cluster.getNextConfigVersion(type); //pure DB calculation call, no cluster locking required
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return version;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return version;
   }
 
   @Override
   public Map<String, String> getProperties() {
-    if (null != entity && null == properties) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (properties == null) {
-          properties = gson.<Map<String, String>>fromJson(entity.getData(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
+    propertyLock.readLock().lock();
     try {
-      return null == properties ? new HashMap<String, String>()
-          : new HashMap<String, String>(properties);
+      return properties == null ? new HashMap<String, String>() : new HashMap<>(properties);
     } finally {
-      readWriteLock.readLock().unlock();
+      propertyLock.readLock().unlock();
     }
-
   }
 
   @Override
   public Map<String, Map<String, String>> getPropertiesAttributes() {
-    if (null != entity && null == propertiesAttributes) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (propertiesAttributes == null) {
-          propertiesAttributes = gson.<Map<String, Map<String, String>>>fromJson(entity.getAttributes(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return null == propertiesAttributes ? null : new HashMap<String, Map<String, String>>(propertiesAttributes);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.tag = tag;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setVersion(Long version) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.version = version;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    return null == propertiesAttributes ? null
+        : new HashMap<String, Map<String, String>>(propertiesAttributes);
   }
 
   @Override
   public void setProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+    propertyLock.writeLock().lock();
     try {
       this.properties = properties;
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesAttributes = propertiesAttributes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesAttributes = propertiesAttributes;
   }
 
   @Override
-  public void updateProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+  public void updateProperties(Map<String, String> propertiesToUpdate) {
+    propertyLock.writeLock().lock();
     try {
-      this.properties.putAll(properties);
+      properties.putAll(propertiesToUpdate);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public List<Long> getServiceConfigVersions() {
-    readWriteLock.readLock().lock();
-    try {
-      if (cluster == null || type == null || version == null) {
-        return Collections.emptyList();
-      }
-      return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
   }
 
   @Override
-  public void deleteProperties(List<String> properties) {
-    readWriteLock.writeLock().lock();
+  public void deleteProperties(List<String> propertyKeysToRemove) {
+    propertyLock.writeLock().lock();
     try {
-      for (String key : properties) {
-        this.properties.remove(key);
-      }
+      Set<String> keySet = properties.keySet();
+      keySet.removeAll(propertyKeysToRemove);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
+  }
+
+  /**
+   * Persist the entity and update the internal state relationships once the
+   * transaction has been committed.
+   */
+  private void persist(ClusterConfigEntity entity) {
+    persistEntitiesInTransaction(entity);
 
+    // ensure that the in-memory state of the cluster is kept consistent
+    cluster.addConfig(this);
+
+    // re-load the entity associations for the cluster
+    cluster.refresh();
+
+    // broadcast the change event for the configuration
+    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+        getType(), getTag(), getVersion());
+
+    eventPublisher.publish(event);
   }
 
-  @Override
-  public void persist() {
-    persist(true);
+  /**
+   * Persist the cluster and configuration entities in their own transaction.
+   */
+  @Transactional
+  void persistEntitiesInTransaction(ClusterConfigEntity entity) {
+    ClusterEntity clusterEntity = entity.getClusterEntity();
+
+    clusterDAO.createConfig(entity);
+    clusterEntity.getClusterConfigEntities().add(entity);
+
+    // save the entity, forcing a flush to ensure the refresh picks up the
+    // newest data
+    clusterDAO.merge(clusterEntity, true);
   }
 
   /**
@@ -352,69 +362,29 @@ public class ConfigImpl implements Config {
    */
   @Override
   @Transactional
-  public void persist(boolean newConfig) {
-    readWriteLock.writeLock().lock();
-    try {
-      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-      if (newConfig) {
-        ClusterConfigEntity entity = new ClusterConfigEntity();
-        entity.setClusterEntity(clusterEntity);
-        entity.setClusterId(cluster.getClusterId());
-        entity.setType(getType());
-        entity.setVersion(getVersion());
-        entity.setTag(getTag());
-        entity.setTimestamp(new Date().getTime());
-        entity.setStack(clusterEntity.getDesiredStack());
-        entity.setData(gson.toJson(getProperties()));
-
-        if (null != getPropertiesAttributes()) {
-          entity.setAttributes(gson.toJson(getPropertiesAttributes()));
-        }
-
-        clusterDAO.createConfig(entity);
-        clusterEntity.getClusterConfigEntities().add(entity);
-
-        // save the entity, forcing a flush to ensure the refresh picks up the
-        // newest data
-        clusterDAO.merge(clusterEntity, true);
-      } else {
-        // only supporting changes to the properties
-        ClusterConfigEntity entity = null;
-
-        // find the existing configuration to update
-        for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
-          if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
-              && getVersion().equals(cfe.getVersion())) {
-            entity = cfe;
-            break;
-          }
-        }
-
-        // if the configuration was found, then update it
-        if (null != entity) {
-          LOG.debug(
-              "Updating {} version {} with new configurations; a new version will not be created",
-              getType(), getVersion());
-
-          entity.setData(gson.toJson(getProperties()));
-
-          // save the entity, forcing a flush to ensure the refresh picks up the
-          // newest data
-          clusterDAO.merge(clusterEntity, true);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+  public void save() {
+    ClusterConfigEntity entity = clusterDAO.findConfig(configId);
+    ClusterEntity clusterEntity = clusterDAO.findById(entity.getClusterId());
 
-    // re-load the entity associations for the cluster
-    cluster.refresh();
+    // if the configuration was found, then update it
+    if (null != entity) {
+      LOG.debug("Updating {} version {} with new configurations; a new version will not be created",
+          getType(), getVersion());
 
-    // broadcast the change event for the configuration
-    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
-        getType(), getTag(), getVersion());
+      entity.setData(gson.toJson(getProperties()));
+
+      // save the entity, forcing a flush to ensure the refresh picks up the
+      // newest data
+      clusterDAO.merge(clusterEntity, true);
+
+      // re-load the entity associations for the cluster
+      cluster.refresh();
+
+      // broadcast the change event for the configuration
+      ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+          getType(), getTag(), getVersion());
 
       eventPublisher.publish(event);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 7bf24ce..649fe38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -326,8 +326,11 @@ public class ClusterImpl implements Cluster {
     loadStackVersion();
     loadServices();
     loadServiceHostComponents();
-    loadConfigGroups();
+
+    // cache configurations before loading configuration groups
     cacheConfigurations();
+    loadConfigGroups();
+
     loadRequestExecutions();
 
     if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
@@ -2566,7 +2569,6 @@ public class ClusterImpl implements Cluster {
           }
         }
         configGroup.setHosts(groupDesiredHosts);
-        configGroup.persist();
       } else {
         throw new IllegalArgumentException("Config group {} doesn't exist");
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 1b29c9b..5a9c574 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -18,13 +18,13 @@
 
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Map;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
 
-import java.util.Map;
-
 /**
  * Configuration group or Config group is a type of Ambari resource that
  * supports grouping of configuration resources and host resources for a
@@ -80,29 +80,20 @@ public interface ConfigGroup {
   public void setDescription(String description);
 
   /**
-   * List of hosts to which configs are applied
+   * Gets an unmodifiable list of {@link Host}s.
+   *
    * @return
    */
   public Map<Long, Host> getHosts();
 
   /**
-   * List of @Config objects
+   * Gets an unmodifiable map of {@link Config}s.
+   *
    * @return
    */
   public Map<String, Config> getConfigurations();
 
   /**
-   * Persist the Config group along with the related host and config mapping
-   * entities to the persistence store
-   */
-  void persist();
-
-  /**
-   * Persist the host mapping entity to the persistence store
-   */
-  void persistHostMapping();
-
-  /**
    * Delete config group and the related host and config mapping
    * entities from the persistence store
    */
@@ -116,13 +107,6 @@ public interface ConfigGroup {
   public void addHost(Host host) throws AmbariException;
 
   /**
-   * Add config to the config group
-   * @param config
-   * @throws AmbariException
-   */
-  public void addConfiguration(Config config) throws AmbariException;
-
-  /**
    * Return @ConfigGroupResponse for the config group
    *
    * @return @ConfigGroupResponse
@@ -131,11 +115,6 @@ public interface ConfigGroup {
   public ConfigGroupResponse convertToResponse() throws AmbariException;
 
   /**
-   * Refresh Config group and the host and config mappings for the group
-   */
-  public void refresh();
-
-  /**
    * Reassign the set of hosts associated with this config group
    * @param hosts
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/704170e4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 9abadf3..906d948 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -17,22 +17,38 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
-import com.google.inject.assistedinject.Assisted;
+import java.util.Map;
+
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.configgroup.ConfigGroup;
 
-import java.util.Map;
+import com.google.inject.assistedinject.Assisted;
 
 public interface ConfigGroupFactory {
-  ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
-                       @Assisted("name") String name,
-                       @Assisted("tag") String tag,
-                       @Assisted("description") String description,
-                       @Assisted("configs") Map<String, Config> configs,
-                       @Assisted("hosts") Map<Long, Host> hosts);
+  /**
+   * Creates and saves a new {@link ConfigGroup}.
+   *
+   * @param cluster
+   * @param name
+   * @param tag
+   * @param description
+   * @param configs
+   * @param hosts
+   * @param serviceName
+   * @return
+   */
+  ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
 
+  /**
+   * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
+   *
+   * @param cluster
+   * @param entity
+   * @return
+   */
   ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
 }


[25/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/mapreduce.conf.j2
new file mode 100644
index 0000000..ae8e6d5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/mapreduce.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile {{mapred_user_nofile_limit}}
+{{mapred_user}}   - nproc  {{mapred_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..3d5f4f2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/taskcontroller.cfg.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/yarn.conf.j2
new file mode 100644
index 0000000..1063099
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/package/templates/yarn.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile {{yarn_user_nofile_limit}}
+{{yarn_user}}   - nproc  {{yarn_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks-mapred/quicklinks.json
new file mode 100644
index 0000000..5ffbc07
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks-mapred/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"mapreduce.jobhistory.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"mapred-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "jobhistory_ui",
+        "label": "JobHistory UI",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name": "jobhistory_logs",
+        "label": "JobHistory logs",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"jobhistory_jmx",
+        "label":"JobHistory JMX",
+        "requires_user_name":"false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..37248d0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"yarn.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"yarn-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "resourcemanager_ui",
+        "label": "ResourceManager UI",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_logs",
+        "label": "ResourceManager logs",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_jmx",
+        "label":"ResourceManager JMX",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes-mapred/theme.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes-mapred/theme.json
new file mode 100644
index 0000000..5019447
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes-mapred/theme.json
@@ -0,0 +1,132 @@
+{
+  "name": "default",
+  "description": "Default theme for MAPREDUCE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-mr-scheduler",
+                  "display-name": "MapReduce",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-mr-scheduler-row1-col1",
+                      "display-name": "MapReduce Framework",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row2-col1",
+                      "display-name": "MapReduce AppMaster",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.map.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.reduce.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col2"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+          "subsection-name": "subsection-mr-scheduler-row2-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.task.io.sort.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col3"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.map.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.reduce.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.task.io.sort.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes/theme.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes/theme.json
new file mode 100644
index 0000000..758cf0c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/themes/theme.json
@@ -0,0 +1,250 @@
+{
+  "name": "default",
+  "description": "Default theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-nm-sizing",
+                  "display-name": "Memory",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-nm-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-nm-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-yarn-platform-features",
+                  "display-name": "YARN Features",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-yarn-platform-features-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-container-sizing",
+                  "display-name": "CPU",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-container-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-container-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+          "subsection-name": "subsection-nm-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.enabled",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-env/yarn_cgroups_enabled",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_cgroups_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
new file mode 100644
index 0000000..79bf5f1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/3.4.6</extends>
+      <version>3.4.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper_${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper-${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper-${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 9dcf561..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description>
-      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.security.key.provider.path</name>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
index 8697740..e680c1b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -20,29 +20,6 @@
  */
 -->
 <configuration supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>keyserver_host</name>
-    <value> </value>
-    <display-name>Key Server Host</display-name>
-    <description>Hostnames where Key Management Server is installed</description>
-    <value-attributes>
-      <type>string</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>keyserver_port</name>
-    <value/>
-    <display-name>Key Server Port</display-name>
-    <description>Port number where Key Management Server is available</description>
-    <value-attributes>
-      <type>int</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
   <!-- These configs were inherited from HDP 2.3 -->
   <!-- hadoop-env.sh -->
   <property>
@@ -50,151 +27,140 @@
     <display-name>hadoop-env template</display-name>
     <description>This is the jinja template for hadoop-env.sh file</description>
     <value>
-# Set Hadoop-specific environment variables here.
+      # Set Hadoop-specific environment variables here.
 
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
+      # The only required environment variable is JAVA_HOME.  All others are
+      # optional.  When running a distributed configuration it is best to
+      # set JAVA_HOME in this file, so that it is correctly defined on
+      # remote nodes.
 
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
+      # The java implementation to use.  Required.
+      export JAVA_HOME={{java_home}}
+      export HADOOP_HOME_WARN_SUPPRESS=1
 
-# Hadoop home directory
-export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      # Hadoop home directory
+      export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-# Hadoop Configuration Directory
+      # Hadoop Configuration Directory
 
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
+      {# this is different for HDP1 #}
+      # Path to jsvc required by secure HDP 2.0 datanode
+      export JSVC_HOME={{jsvc_path}}
 
 
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+      # The maximum amount of heap to use, in MB. Default is 1000.
+      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
 
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+      # Extra Java runtime options.  Empty by default.
+      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
-# Command specific options appended to HADOOP_OPTS when specified
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+      # Command specific options appended to HADOOP_OPTS when specified
+      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 
-{% if java_version &lt; 8 %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+      {% if java_version &lt; 8 %}
+      SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+      export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+      export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+      export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 
-{% else %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+      {% else %}
+      SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+      export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+      export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+      export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-{% endif %}
+      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+      {% endif %}
 
-HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+      HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
 
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+      # On secure datanodes, user to run the datanode as after dropping privileges
+      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
 
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+      # Extra ssh options.  Empty by default.
+      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
 
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+      # Where log files are stored.  $HADOOP_HOME/logs by default.
+      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
 
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+      # History server logs
+      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
 
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+      # Where log files are stored in the secure data environment.
+      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
 
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+      # host:path where hadoop code should be rsync'd from.  Unset by default.
+      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
 
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
+      # Seconds to sleep between slave commands.  Unset by default.  This
+      # can be useful in large clusters, where, e.g., slave rsyncs can
+      # otherwise arrive faster than the master can service them.
+      # export HADOOP_SLAVE_SLEEP=0.1
 
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+      # The directory where pid files are stored. /tmp by default.
+      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+      # History server pid
+      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
 
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
 
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
+      # A string representing this instance of hadoop. $USER by default.
+      export HADOOP_IDENT_STRING=$USER
 
-# The scheduling priority for daemon processes.  See 'man nice'.
+      # The scheduling priority for daemon processes.  See 'man nice'.
 
-# export HADOOP_NICENESS=10
+      # export HADOOP_NICENESS=10
 
-# Add database libraries
-JAVA_JDBC_LIBS=""
-if [ -d "/usr/share/java" ]; then
-  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-  do
-    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-  done
-fi
+      # Add database libraries
+      JAVA_JDBC_LIBS=""
+      if [ -d "/usr/share/java" ]; then
+      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+      do
+      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+      done
+      fi
 
-# Add libraries to the hadoop classpath - some may not need a colon as they already include it
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
+      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
 
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+      # Setting path to hdfs command line
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
-# Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+      # Mostly required for hadoop 2.0
+      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
 
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+      export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
 
 
-# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
-# Makes sense to fix only when runing DN as root 
-if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  {% if is_datanode_max_locked_memory_set %}
-  ulimit -l {{datanode_max_locked_memory}}
-  {% endif %}
-  ulimit -n {{hdfs_user_nofile_limit}}
-fi
+      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
+      # Makes sense to fix only when runing DN as root
+      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+      {% if is_datanode_max_locked_memory_set %}
+      ulimit -l {{datanode_max_locked_memory}}
+      {% endif %}
+      ulimit -n {{hdfs_user_nofile_limit}}
+      fi
     </value>
     <value-attributes>
       <type>content</type>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>nfsgateway_heapsize</name>
-    <display-name>NFSGateway maximum Java heap size</display-name>
-    <value>1024</value>
-    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 215a6ee..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>content</name>
-    <display-name>hdfs-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-# Define some default values that can be overridden by system properties
-# To change daemon root logger use hadoop_root_logger in hadoop-env
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# NameNode metrics logging.
-# The default is to retain two namenode-metrics.log files up to 64MB each.
-#
-namenode.metrics.logger=INFO,NullAppender
-log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to suppress normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=WARN
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index ac141d1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
-  <!-- These configs were inherited from HDP 2.1 -->
-  <property>
-    <name>dfs.namenode.audit.log.async</name>
-    <value>true</value>
-    <description>Whether to enable async auditlog</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.fslock.fair</name>
-    <value>false</value>
-    <description>Whether fsLock is fair</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
-    <value>3600</value>
-    <description>
-      The delay in seconds at which we will pause the blocks deletion
-      after Namenode startup. By default it's disabled.
-      In the case a directory has large number of directories and files are
-      deleted, suggested delay is one hour to give the administrator enough time
-      to notice large number of pending deletion blocks and take corrective
-      action.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/hadoop/hdfs/journalnode</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.client.retry.policy.enabled</name>
-    <value>false</value>
-    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.content-summary.limit</name>
-    <value>5000</value>
-    <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.encryption.key.provider.uri</name>
-    <description>
-      The KeyProvider to use when interacting with encryption keys used
-      when reading and writing to an encryption zone.
-    </description>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>nfs.file.dump.dir</name>
-    <value>/tmp/.hdfs-nfs</value>
-    <display-name>NFSGateway dump directory</display-name>
-    <description>
-      This directory is used to temporarily save out-of-order writes before
-      writing to HDFS. For each file, the out-of-order writes are dumped after
-      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
-      One needs to make sure the directory has enough space.
-    </description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>nfs.exports.allowed.hosts</name>
-    <value>* rw</value>
-    <description>
-      By default, the export can be mounted by any client. To better control the access,
-      users can update the following property. The value string contains machine name and access privilege,
-      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
-      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
-      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
-      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
-    </description>
-    <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer.cipher.suites</name>
-    <value>AES/CTR/NoPadding</value>
-    <description>
-      This value may be either undefined or AES/CTR/NoPadding. If defined, then 
-      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption. 
-      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm 
-      is used. By default, the property is not defined.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.inode.attributes.provider.class</name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-hdfs-plugin-properties</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
deleted file mode 100644
index fd41817..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
+++ /dev/null
@@ -1,217 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <value>false</value>
-    <display-name>Audit to DB</display-name>
-    <description>Is Audit to DB enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.db</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <value>{{audit_jdbc_url}}</value>
-    <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <value>{{xa_audit_db_user}}</value>
-    <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <value>crypted</value>
-    <property-type>PASSWORD</property-type>
-    <description>Audit DB JDBC Password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <value>{{jdbc_driver}}</value>
-    <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/db/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs are deleted in HDP 2.5. -->
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
deleted file mode 100644
index b31742c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for HDFS</display-name>
-    <description>This user must be system user and also present at Ranger
-      admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-hdfs-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for HDFS</display-name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication</value>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false" />
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
deleted file mode 100644
index 1bc83df..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
deleted file mode 100644
index 1b0a821..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>ranger.plugin.hdfs.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing Hdfs policies</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.add-hadoop-authorization</name>
-    <value>true</value>
-    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>


[30/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_metrics.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_metrics.json
new file mode 100644
index 0000000..a66bb34
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/YARN_metrics.json
@@ -0,0 +1,3486 @@
+{
+  "NODEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logError": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedGB": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedVCores": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLocalDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLogDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/ContainersFailed": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedContainers": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "RESOURCEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "yarn.ClusterMetrics.NumLostNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "yarn.ClusterMetrics.NumActiveNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveApplications": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveApplications",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveUsers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveUsers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersAllocated": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersAllocated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersReleased": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersReleased",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayAvgTime": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayNumOps": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_0": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_0",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_1440": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_1440",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_300": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_300",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_60": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_60",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/ThreadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/NodeHeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {

<TRUNCATED>

[36/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer-err.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer-err.log b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer-err.log
new file mode 100644
index 0000000..d7c6704
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/package/scripts/balancer-emulator/balancer-err.log
@@ -0,0 +1,1032 @@
+14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
+14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
+14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
+14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
+14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
+14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
+14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
+14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
+14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
+14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
+14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
+14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
+14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
+14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
+14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
+14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
+14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
+14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
+14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
+14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
+14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
+14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
+14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
+14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
+14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
+14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
+14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
+14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
+14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
+14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
+14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
+14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
+14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
+14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
+14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
+14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
+14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
+14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50

<TRUNCATED>

[44/51] [abbrv] ambari git commit: AMBARI-19148 Wrong filter groups names "Missing translation" on "Upgrade history" tab of Stack and Versions. (atkach)

Posted by sm...@apache.org.
AMBARI-19148 Wrong filter groups names "Missing translation" on "Upgrade history" tab of Stack and Versions. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/54da8c27
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/54da8c27
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/54da8c27

Branch: refs/heads/branch-feature-AMBARI-18901
Commit: 54da8c27c97fd1f9ff2c3bc8af5c9b882608d5aa
Parents: 6150f95
Author: Andrii Tkach <at...@apache.org>
Authored: Fri Dec 9 14:24:38 2016 +0200
Committer: Andrii Tkach <at...@apache.org>
Committed: Fri Dec 9 14:24:38 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/messages.js | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/54da8c27/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 945acfb..974b543 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1756,6 +1756,8 @@ Em.I18n.translations = {
   'admin.stackVersions.upgradeHistory.filter.successful.downgrade': 'Successful Downgrade ({0})',
   'admin.stackVersions.upgradeHistory.filter.aborted.upgrade': 'Aborted Upgrade ({0})',
   'admin.stackVersions.upgradeHistory.filter.aborted.downgrade': 'Aborted Downgrade ({0})',
+  'admin.stackVersions.upgradeHistory.filter.failed.upgrade': 'Failed Upgrade ({0})',
+  'admin.stackVersions.upgradeHistory.filter.failed.downgrade': 'Failed Downgrade ({0})',
   'admin.stackVersions.upgradeHistory.no.history': 'No upgrade/downgrade history available',
   'admin.stackVersions.upgradeHistory.record.title': '{0} {1} to {2}',
 


[38/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metrics.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metrics.json
new file mode 100644
index 0000000..c66387d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/metrics.json
@@ -0,0 +1,7905 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/dfs/FSNamesystem/TotalLoad": {
+              "metric": "dfs.FSNamesystem.TotalLoad",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotal": {
+              "metric": "dfs.FSNamesystem.CapacityTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemaining": {
+              "metric": "dfs.FSNamesystem.CapacityRemaining",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/BlockCapacity": {
+              "metric": "dfs.FSNamesystem.BlockCapacity",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetListingOps": {
+              "metric": "dfs.namenode.GetListingOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesAppended": {
+              "metric": "dfs.namenode.FilesAppended",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/TotalFileOps": {
+              "metric": "dfs.namenode.TotalFileOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/fsync_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/renewLease_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getFileInfo_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "unit": "MB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/complete_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setPermission_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+              "metric": "dfs.FSNamesystem.CapacityTotalGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setOwner_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocations_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+              "metric": "dfs.FSNamesystem.CapacityUsedGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/AddBlockOps": {
+              "metric": "dfs.namenode.AddBlockOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesDeleted": {
+              "metric": "dfs.namenode.FilesDeleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Syncs_avg_time": {
+              "metric": "dfs.namenode.SyncsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/blockReport_avg_time": {
+              "metric": "dfs.namenode.BlockReportAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getFileInfo_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getEditLogSize_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReceived_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/versionRequest_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/versionRequest_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/addBlock_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesCreated": {
+              "metric": "dfs.namenode.FilesCreated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rename_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setSafeMode_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setPermission_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesRenamed": {
+              "metric": "dfs.namenode.FilesRenamed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/register_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setReplication_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetReplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetBlockLocations": {
+              "metric": "dfs.namenode.GetBlockLocations",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/fsync_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/create_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+              "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/delete_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FileInfoOps": {
+              "metric": "dfs.namenode.FileInfoOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DeleteFileOps": {
+              "metric": "dfs.namenode.DeleteFileOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReport_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setSafeMode_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+              "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getEditLogSize_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesInGetListingOps": {
+              "metric": "dfs.namenode.FilesInGetListingOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/complete_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Syncs_num_ops": {
+              "metric": "dfs.namenode.SyncsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReceived_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setReplication_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetReplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rollEditLog_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/FilesTotal": {
+              "metric": "dfs.FSNamesystem.FilesTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ExcessBlocks": {
+              "metric": "dfs.FSNamesystem.ExcessBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/blockReport_num_ops": {
+              "metric": "dfs.namenode.BlockReportNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/SafemodeTime": {
+              "metric": "dfs.namenode.SafemodeTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/mkdirs_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "unit": "MB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+              "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/BlocksTotal": {
+              "metric": "dfs.FSNamesystem.BlocksTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocations_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Transactions_num_ops": {
+              "metric": "dfs.namenode.TransactionsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/create_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+              "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Transactions_avg_time": {
+              "metric": "dfs.namenode.TransactionsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/dfs/FSNamesystem/MissingBlocks": {
+              "metric": "dfs.FSNamesystem.MissingBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/delete_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CorruptBlocks": {
+              "metric": "dfs.FSNamesystem.CorruptBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rename_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReport_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/mkdirs_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/fsImageLoadTime": {
+              "metric": "dfs.namenode.FsImageLoadTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getListing_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rollEditLog_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/addBlock_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setOwner_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+              "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateFileOps": {
+              "metric": "dfs.namenode.CreateFileOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/register_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getListing_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/renewLease_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/ElapsedTime": {
+              "metric": "default.StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsCount": {
+              "metric": "default.StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "default.StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "default.StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsTotal": {
+              "metric": "default.StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageCount": {
+              "metric": "default.StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "default.StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "default.StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageTotal": {
+              "metric": "default.StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/PercentComplete": {
+              "metric": "default.StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeCount": {
+              "metric": "default.StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeElapsedTime": {
+              "metric": "default.StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModePercentComplete": {
+              "metric": "default.StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeTotal": {
+              "metric": "default.StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointCount": {
+              "metric": "default.StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "default.StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "default.StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointTotal": {
+              "metric": "default.StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ExpiredHeartbeats": {
+              "metric": "dfs.FSNamesystem.ExpiredHeartbeats",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/LastCheckpointTime": {
+              "metric": "dfs.FSNamesystem.LastCheckpointTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/LastWrittenTransactionId": {
+              "metric": "dfs.FSNamesystem.LastWrittenTransactionId",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/MillisSinceLastLoadedEdits": {
+              "metric": "dfs.FSNamesystem.MillisSinceLastLoadedEdits",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/MissingReplOneBlocks": {
+              "metric": "dfs.FSNamesystem.MissingReplOneBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingDataNodeMessageCount": {
+              "metric": "dfs.FSNamesystem.PendingDataNodeMessageCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PostponedMisreplicatedBlocks": {
+              "metric": "dfs.FSNamesystem.PostponedMisreplicatedBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/Snapshots": {
+              "metric": "dfs.FSNamesystem.Snapshots",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/SnapshottableDirectories": {
+              "metric": "dfs.FSNamesystem.SnapshottableDirectories",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/StaleDataNodes": {
+              "metric": "dfs.FSNamesystem.StaleDataNodes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TotalFiles": {
+              "metric": "dfs.FSNamesystem.TotalFiles",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastCheckpoint": {
+              "metric": "dfs.FSNamesystem.TransactionsSinceLastCheckpoint",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastLogRoll": {
+              "metric": "dfs.FSNamesystem.TransactionsSinceLastLogRoll",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/AllowSnapshotOps": {
+              "metric": "dfs.namenode.AllowSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/BlockReceivedAndDeletedOps": {
+              "metric": "dfs.namenode.BlockReceivedAndDeletedOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CacheReportAvgTime": {
+              "metric": "dfs.namenode.CacheReportAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CacheReportNumOps": {
+              "metric": "dfs.namenode.CacheReportNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateSnapshotOps": {
+              "metric": "dfs.namenode.CreateSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateSymlinkOps": {
+              "metric": "dfs.namenode.CreateSymlinkOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DeleteSnapshotOps": {
+              "metric": "dfs.namenode.DeleteSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DisallowSnapshotOps": {
+              "metric": "dfs.namenode.DisallowSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesTruncated": {
+              "metric": "dfs.namenode.FilesTruncated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetAdditionalDatanodeOps": {
+              "metric": "dfs.namenode.GetAdditionalDatanodeOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetEditAvgTime": {
+              "metric": "dfs.namenode.GetEditAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetEditNumOps": {
+              "metric": "dfs.namenode.GetEditNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetImageAvgTime": {
+              "metric": "dfs.namenode.GetImageAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetImageNumOps": {
+              "metric": "dfs.namenode.GetImageNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetLinkTargetOps": {
+              "metric": "dfs.namenode.GetLinkTargetOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/ListSnapshottableDirOps": {
+              "metric": "dfs.namenode.ListSnapshottableDirOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/PutImageAvgTime": {
+              "metric": "dfs.namenode.PutImageAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/PutImageNumOps": {
+              "metric": "dfs.namenode.PutImageNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/RenameSnapshotOps": {
+              "metric": "dfs.namenode.RenameSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/SnapshotDiffReportOps": {
+              "metric": "dfs.namenode.SnapshotDiffReportOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/StorageBlockReportOps": {
+              "metric": "dfs.namenode.StorageBlockReportOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/TransactionsBatchedInSync": {
+              "metric": "dfs.namenode.TransactionsBatchedInSync",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcNumInfoThresholdExceeded": {
+              "metric": "jvm.JvmMetrics.GcNumInfoThresholdExceeded",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcNumWarnThresholdExceeded": {
+              "metric": "jvm.JvmMetrics.GcNumWarnThresholdExceeded",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTotalExtraSleepTime": {
+              "metric": "jvm.JvmMetrics.GcTotalExtraSleepTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheCleared": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheCleared",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheHit": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheHit",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheUpdated": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheUpdated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/IOExceptionAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.IOExceptionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/IOExceptionNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.IOExceptionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/Rename2AvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.Rename2AvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/Rename2NumOps": {
+              "metric": "rpcdetailed.rpcdetailed.Rename2NumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/SetTimesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.SetTimesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/SetTimesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.SetTimesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/dfs/namenode/Used": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/TotalLoad": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastCheckpoint": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TransactionsSinceLastCheckpoint",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastLogRoll": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TransactionsSinceLastLogRoll",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/ExpiredHeartbeats": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExpiredHeartbeats",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/LastCheckpointTime": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.LastCheckpointTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/LastWrittenTransactionId": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.LastWrittenTransactionId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/MillisSinceLastLoadedEdits": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MillisSinceLastLoadedEdits",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/MissingReplOneBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingReplOneBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PendingDataNodeMessageCount": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDataNodeMessageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PostponedMisreplicatedBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PostponedMisreplicatedBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/Snapshots": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.Snapshots",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/SnapshottableDirectories": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.SnapshottableDirectories",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/StaleDataNodes": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.StaleDataNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/TotalFiles": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalFiles",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/BlockCapacity": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/TotalFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/HostName": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/GetListingOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/UpgradeFinalized": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/fsync_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Safemode": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CorruptBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/LiveNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/renewLease_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getFileInfo_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemaining": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/PercentRemaining": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+              "unit": "MB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/complete_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getBlockLocations_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/AddBlockOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Syncs_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/PercentUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/DecomNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/blockReport_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonDfsUsedSpace": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/UpgradeFinalized": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getFileInfo_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getEditLogSize_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReceived_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Safemode": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FilesCreated": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/addBlock_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/DecomNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsed": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/DeadNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/PercentUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Free": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Total": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/GetBlockLocations": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/fsync_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/create_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/UnderReplicatedBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FileInfoOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/MissingBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReport_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityRemaining": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getEditLogSize_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FilesInGetListingOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/BlocksTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/complete_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/LiveNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollFsImage_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Syncs_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReceived_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollEditLog_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/DeadNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/FilesTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Version": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/ExcessBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/PercentRemaining": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/blockReport_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollFsImage_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+              "unit": "MB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/BlocksTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getBlockLocations_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Transactions_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/create_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityTotal": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Transactions_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/MissingBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Threads": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CorruptBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReport_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/TotalFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/NameDirStatuses": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getListing_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollEditLog_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/addBlock_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/CreateFileOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logError": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Version": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getListing_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/NonDfsUsedSpace": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/renewLease_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/TotalBlocks": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountConcurrentMarkSweep": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/CorruptFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.CorruptFiles",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+          

<TRUNCATED>