You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ja...@apache.org on 2017/10/11 23:03:28 UTC

[01/50] [abbrv] ambari git commit: Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-14714-ui 9306544e5 -> 5ddb07cdb


Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

This reverts commit 7172655ff269fbb7e0d29ba93197aa5a804749c0.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b358ee20
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b358ee20
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b358ee20

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: b358ee20e857d31017e54c1bde6f3c1aac084030
Parents: fab2aa3
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 16:44:41 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 16:44:59 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 --
 .../AmbariManagementControllerImpl.java         | 22 +++--
 .../internal/ServiceResourceProvider.java       |  8 ++
 .../internal/StackArtifactResourceProvider.java | 18 +++-
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ++++
 .../apache/ambari/server/stack/StackModule.java |  5 ++
 .../apache/ambari/server/state/StackInfo.java   |  8 ++
 .../resources/stacks/HDP/2.0.6/widgets.json     | 95 ++++++++++++++++++++
 .../main/resources/stacks/HDP/3.0/widgets.json  | 95 ++++++++++++++++++++
 .../server/api/services/AmbariMetaInfoTest.java |  8 --
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 --
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 ++++++++++++++++++++
 14 files changed, 364 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 425d247..de84965 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
-import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -126,7 +125,6 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
-  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -216,8 +214,6 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
-
-    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1439,8 +1435,4 @@ public class AmbariMetaInfo {
 
     return null;
   }
-
-  public File getCommonWidgetsDescriptorFile() {
-    return commonWidgetsDescriptorFile;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5642575..b2993e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,12 +5184,22 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      // common cluster level widgets
-      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
-        widgetDescriptorFiles.add(commonWidgetsFile);
-      } else {
-        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
+      Set<StackId> stackIds = new HashSet<>();
+
+      for (Service svc : cluster.getServices().values()) {
+        stackIds.add(svc.getDesiredStackId());
+      }
+
+      for (StackId stackId : stackIds) {
+        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
+        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+        if (widgetDescriptorFileLocation != null) {
+          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+          if (widgetDescriptorFile.exists()) {
+            widgetDescriptorFiles.add(widgetDescriptorFile);
+          }
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index e65693b..76a4547 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,6 +423,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
+    Set<Cluster> clustersSetFromRequests = new HashSet<>();
+
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -478,6 +480,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
+      clustersSetFromRequests.add(cluster);
+    }
+
+    // Create cluster widgets and layouts
+    for (Cluster cluster : clustersSetFromRequests) {
+      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index a7f7710..2e8a32a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return null;
+      return getWidgetsDescriptorForCluster(stackInfo);
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,6 +450,22 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
+  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
+      throws NoSuchParentResourceException, IOException {
+
+    Map<String, Object> widgetDescriptor = null;
+
+    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+    if (widgetDescriptorFileLocation != null) {
+      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+      if (widgetDescriptorFile.exists()) {
+        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
+      }
+    }
+
+    return widgetDescriptor;
+  }
+
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 1fa45e9..90d98fc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,7 +33,6 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
-import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -42,8 +41,7 @@ import javax.persistence.UniqueConstraint;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0,
-        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
+        initialValue = 0
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -58,7 +56,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, length = 255)
+  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index e3c586b..9259466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,6 +94,11 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
+   * widgets descriptor file path
+   */
+  private String widgetsDescriptorFilePath;
+
+  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -228,6 +233,15 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
+   * Obtain the path to the (stack-level) widgets descriptor file
+   *
+   * @return the path to the (stack-level) widgets descriptor file
+   */
+  public String getWidgetsDescriptorFilePath() {
+    return widgetsDescriptorFilePath;
+  }
+
+  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -310,6 +324,10 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
+    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
+      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
+    }
+
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 71235f3..742706d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,6 +294,10 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
+    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
+      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
+    }
+
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -569,6 +573,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
+      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 3efc997..dcf850f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,6 +429,14 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
+  public String getWidgetsDescriptorFileLocation() {
+    return widgetsDescriptorFileLocation;
+  }
+
+  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
+    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
+  }
+
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 25e8d04..4baca5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,14 +1894,6 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
-  @Test
-  public void testGetCommonWidgetsFile() throws AmbariException {
-    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
-
-    Assert.assertNotNull(widgetsFile);
-    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
-  }
-
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 9547271..a02690f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,14 +2367,18 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
+    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
+        .put("HDFS", service)
+        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
+    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
+    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7094caa..b370829 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10425,11 +10424,6 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
-    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-    assertNotNull(widgetsFile);
-    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
-    assertTrue(widgetsFile.exists());
-
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[20/50] [abbrv] ambari git commit: Revert "AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)"

Posted by ja...@apache.org.
Revert "AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)"

This reverts commit d3b67eeab455b01f8f921039b50818a6ded32839.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e83c86dc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e83c86dc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e83c86dc

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: e83c86dc310e4878f57f2cec9e32e8dc83394913
Parents: ce2a0a0
Author: pallavkul <pa...@gmail.com>
Authored: Mon Oct 9 16:47:14 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Mon Oct 9 16:47:14 2017 +0530

----------------------------------------------------------------------
 .../savedQueries/SavedQueryResourceManager.java | 17 ++----
 .../resources/ui/app/routes/queries/query.js    | 63 +++++---------------
 .../hive20/src/main/resources/ui/yarn.lock      |  2 +-
 3 files changed, 21 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
index fff202c..3690683 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
@@ -115,17 +115,12 @@ public class SavedQueryResourceManager extends PersonalCRUDResourceManager<Saved
   }
 
   @Override
-  public SavedQuery update(SavedQuery object, String id) throws ItemNotFound {
-    String query = object.getShortQuery();
-    object.setShortQuery(makeShortQuery(query));
-    object = super.update(object, id);
-    try {
-      createDefaultQueryFile(object, query);
-
-    } catch (ServiceFormattedException e) {
-      cleanupAfterErrorAndThrowAgain(object, e);
-    }
-    return object;
+  public SavedQuery update(SavedQuery newObject, String id) throws ItemNotFound {
+    SavedQuery savedQuery = super.update(newObject, id);
+    // Emptying short query so that in next read, this gets updated with proper value
+    // from the queryFile
+    emptyShortQueryField(savedQuery);
+    return savedQuery;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
index 3e5adc1..9e8b6db 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
@@ -631,71 +631,36 @@ export default Ember.Route.extend(UILoggerMixin, {
       let owner = this.get('controller.model').get('owner');
       let queryFile = this.get('controller.model').get('queryFile');
       let logFile = this.get('controller.model').get('logFile');
-      let shortQuery = (currentQuery.length > 0) ? currentQuery : ";";
-      let savedQueryId = this.get('controller.model').get('id');
 
-
-      this.store.findAll('savedQuery').then(savedQueries => {
-        return savedQueries.toArray();
-      }).then((existingSavedQueries) =>{
-
-        var queryExist = existingSavedQueries.filterBy('id', savedQueryId).get('firstObject');
-
-        if(queryExist){
-          this.send('updateSavedQuery',  queryExist.get('id'));
-        } else{
-          this.send('addSavedQuery', selectedDb, newTitle, owner, shortQuery );
-        }
-
-
-      });
-
-    },
-
-    addSavedQuery(selectedDb, newTitle, owner, shortQuery){
+      let payload = {"title" : newTitle,
+        "dataBase": selectedDb,
+        "owner" : owner,
+        "shortQuery" : (currentQuery.length > 0) ? currentQuery : ";",
+        "queryFile" : queryFile,
+        "logFile" : logFile};
 
       let newSaveQuery = this.get('store').createRecord('saved-query',
         { dataBase:selectedDb,
           title:newTitle,
+          queryFile: queryFile,
           owner: owner,
-          shortQuery: shortQuery
+          shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
         });
 
+
       newSaveQuery.save().then((data) => {
+        console.log('saved query saved');
+
         this.get('controller.model').set('title', newTitle);
         this.get('controller.model').set('isQueryDirty', false);
         this.get('controller').set('worksheetModalSuccess', true);
+
         Ember.run.later(() => {
           this.get('controller').set('showWorksheetModal', false);
           this.closeWorksheetAfterSave();
         }, 2 * 1000);
-      });
-
-    },
-
-    updateSavedQuery(savedQueryId){
-          let currentQuery = this.get('controller.model').get('query');
-          let selectedDb = this.get('controller.model').get('selectedDb');
-          let owner = this.get('controller.model').get('owner');
-
-          this.get('store').findRecord('saved-query', savedQueryId ).then(savedQuery => {
-            savedQuery.set('shortQuery', (currentQuery.length > 0) ? currentQuery : ";");
-            savedQuery.set('dataBase', selectedDb );
-            savedQuery.set('owner', owner );
-
-            savedQuery.save().then(savedQuery => {
-
-                this.get('controller.model').set('isQueryDirty', false);
-                this.get('controller').set('worksheetModalSuccess', true);
-
-                Ember.run.later(() => {
-                  this.get('controller').set('showWorksheetModal', false);
-                  this.closeWorksheetAfterSave();
-                }, 2 * 1000);
-            })
-
-          });
 
+      });
 
     },
 
@@ -703,7 +668,7 @@ export default Ember.Route.extend(UILoggerMixin, {
       this.get('controller').set('showWorksheetModal', false);
       this.closeWorksheetAfterSave();
       this.get('controller.model').set('tabDataToClose', null);
-    },
+  },
 
     expandQueryEdidorPanel(){
       if(!this.get('isQueryEdidorPaneExpanded')){

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83c86dc/contrib/views/hive20/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/yarn.lock b/contrib/views/hive20/src/main/resources/ui/yarn.lock
index 607cf81..477a15c 100644
--- a/contrib/views/hive20/src/main/resources/ui/yarn.lock
+++ b/contrib/views/hive20/src/main/resources/ui/yarn.lock
@@ -569,7 +569,7 @@ babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
   dependencies:
     babel-runtime "^6.22.0"
 
-babel-plugin-transform-es2015-block-scoping@^6.23.0, babel-plugin-transform-es2015-block-scoping@^6.24.1:
+babel-plugin-transform-es2015-block-scoping@^6.23.0:
   version "6.24.1"
   resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
   dependencies:


[21/50] [abbrv] ambari git commit: AMBARI-22168 Move service metrics to separate tab. (atkach)

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/views/main/service/info/summary_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/summary_test.js b/ambari-web/test/views/main/service/info/summary_test.js
index 41f2992..9498230 100644
--- a/ambari-web/test/views/main/service/info/summary_test.js
+++ b/ambari-web/test/views/main/service/info/summary_test.js
@@ -30,9 +30,7 @@ describe('App.MainServiceInfoSummaryView', function() {
         id: 'HDFS',
         serviceName: 'HDFS',
         hostComponents: []
-      }),
-      getActiveWidgetLayout: Em.K,
-      loadWidgetLayouts: Em.K
+      })
     }),
     alertsController: Em.Object.create(),
     service: Em.Object.create()
@@ -523,281 +521,4 @@ describe('App.MainServiceInfoSummaryView', function() {
       )).to.be.true;
     });
   });
-
-  describe("#constructGraphObjects()", function() {
-    var mock = Em.Object.create({
-      isServiceWithWidgets: false
-    });
-
-    beforeEach(function() {
-      sinon.stub(App.StackService, 'find').returns(mock);
-      sinon.stub(view, 'getUserPref').returns({
-        complete: function(callback){callback();}
-      })
-    });
-    afterEach(function() {
-      App.StackService.find.restore();
-      view.getUserPref.restore();
-    });
-
-    it("metrics not loaded", function() {
-      mock.set('isServiceWithWidgets', false);
-      view.constructGraphObjects(null);
-      expect(view.get('isServiceMetricLoaded')).to.be.false;
-      expect(view.getUserPref.called).to.be.false;
-    });
-
-    it("metrics loaded", function() {
-      App.ChartServiceMetricsG1 = Em.Object.extend();
-      mock.set('isServiceWithWidgets', true);
-      view.constructGraphObjects(['G1']);
-      expect(view.get('isServiceMetricLoaded')).to.be.true;
-      expect(view.getUserPref.calledOnce).to.be.true;
-      expect(view.get('serviceMetricGraphs')).to.not.be.empty;
-    });
-  });
-
-  describe("#getUserPrefSuccessCallback()", function() {
-
-    it("currentTimeRangeIndex should be set", function() {
-      view.getUserPrefSuccessCallback(1);
-      expect(view.get('currentTimeRangeIndex')).to.equal(1);
-    });
-  });
-
-  describe("#getUserPrefErrorCallback()", function() {
-
-    beforeEach(function() {
-      sinon.stub(view, 'postUserPref');
-    });
-    afterEach(function() {
-      view.postUserPref.restore();
-    });
-
-    it("request.status = 404", function() {
-      view.getUserPrefErrorCallback({status: 404});
-      expect(view.get('currentTimeRangeIndex')).to.equal(0);
-      expect(view.postUserPref.calledOnce).to.be.true;
-    });
-
-    it("request.status = 403", function() {
-      view.getUserPrefErrorCallback({status: 403});
-      expect(view.postUserPref.called).to.be.false;
-    });
-  });
-
-  describe("#widgetActions", function() {
-
-    beforeEach(function() {
-      this.mock = sinon.stub(App, 'isAuthorized');
-      view.setProperties({
-        staticWidgetLayoutActions: [{id: 1}],
-        staticAdminPrivelegeWidgetActions: [{id: 2}],
-        staticGeneralWidgetActions: [{id: 3}]
-      });
-    });
-    afterEach(function() {
-      this.mock.restore();
-    });
-
-    it("not authorized", function() {
-      this.mock.returns(false);
-      view.propertyDidChange('widgetActions');
-      expect(view.get('widgetActions').mapProperty('id')).to.eql([3]);
-    });
-
-    it("is authorized", function() {
-      this.mock.returns(true);
-      App.supports.customizedWidgetLayout = true;
-      view.propertyDidChange('widgetActions');
-      expect(view.get('widgetActions').mapProperty('id')).to.eql([1, 2, 3]);
-    });
-  });
-
-  describe("#doWidgetAction()", function() {
-
-    beforeEach(function() {
-      view.set('controller.action1', Em.K);
-      sinon.stub(view.get('controller'), 'action1');
-    });
-    afterEach(function() {
-      view.get('controller').action1.restore();
-    });
-
-    it("action exist", function() {
-      view.doWidgetAction({context: 'action1'});
-      expect(view.get('controller').action1.calledOnce).to.be.true;
-    });
-  });
-
-  describe("#setTimeRange", function() {
-
-    it("range = 0", function() {
-      var widget = Em.Object.create({
-        widgetType: 'GRAPH',
-        properties: {
-          time_range: '0'
-        }
-      });
-      view.set('controller.widgets', [widget]);
-      view.setTimeRange({context: {value: '0'}});
-      expect(widget.get('properties').time_range).to.be.equal('0')
-    });
-
-    it("range = 1", function() {
-      var widget = Em.Object.create({
-        widgetType: 'GRAPH',
-        properties: {
-          time_range: 0
-        }
-      });
-      view.set('controller.widgets', [widget]);
-      view.setTimeRange({context: {value: '1'}});
-      expect(widget.get('properties').time_range).to.be.equal('1')
-    });
-  });
-
-  describe("#makeSortable()", function() {
-    var mock = {
-      on: function(arg1, arg2, callback) {
-        callback();
-      },
-      off: Em.K,
-      sortable: function() {
-        return {
-          disableSelection: Em.K
-        }
-      }
-    };
-
-    beforeEach(function() {
-      sinon.stub(window, '$').returns(mock);
-      sinon.spy(mock, 'on');
-      sinon.spy(mock, 'off');
-      sinon.spy(mock, 'sortable');
-      view.makeSortable();
-    });
-    afterEach(function() {
-      window.$.restore();
-      mock.on.restore();
-      mock.off.restore();
-      mock.sortable.restore();
-    });
-
-    it("on() should be called", function() {
-      expect(mock.on.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
-    });
-
-    it("sortable() should be called", function() {
-      expect(mock.sortable.calledOnce).to.be.true;
-    });
-
-    it("off() should be called", function() {
-      expect(mock.off.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
-    });
-  });
-
-  describe('#didInsertElement', function () {
-
-    beforeEach(function () {
-      sinon.stub(view, 'constructGraphObjects', Em.K);
-      this.mock = sinon.stub(App, 'get');
-      sinon.stub(view, 'getServiceModel');
-      sinon.stub(view.get('controller'), 'getActiveWidgetLayout');
-      sinon.stub(view.get('controller'), 'loadWidgetLayouts');
-      sinon.stub(view, 'makeSortable');
-      sinon.stub(view, 'addWidgetTooltip');
-
-    });
-
-    afterEach(function () {
-      view.constructGraphObjects.restore();
-      this.mock.restore();
-      view.getServiceModel.restore();
-      view.get('controller').getActiveWidgetLayout.restore();
-      view.get('controller').loadWidgetLayouts.restore();
-      view.makeSortable.restore();
-      view.addWidgetTooltip.restore();
-    });
-
-    it("getServiceModel should be called", function() {
-      view.didInsertElement();
-      expect(view.getServiceModel.calledOnce).to.be.true;
-    });
-    it("addWidgetTooltip should be called", function() {
-      view.didInsertElement();
-      expect(view.addWidgetTooltip.calledOnce).to.be.true;
-    });
-    it("makeSortable should be called", function() {
-      view.didInsertElement();
-      expect(view.makeSortable.calledOnce).to.be.true;
-    });
-    it("getActiveWidgetLayout should be called", function() {
-      view.didInsertElement();
-      expect(view.get('controller').getActiveWidgetLayout.calledOnce).to.be.true;
-    });
-
-    describe("serviceName is null, metrics not supported, widgets not supported", function() {
-      beforeEach(function () {
-        view.set('controller.content.serviceName', null);
-        this.mock.returns(false);
-        view.didInsertElement();
-      });
-
-      it("loadWidgetLayouts should not be called", function() {
-        expect(view.get('controller').loadWidgetLayouts.called).to.be.false;
-      });
-      it("constructGraphObjects should not be called", function() {
-        expect(view.constructGraphObjects.called).to.be.false;
-      });
-    });
-
-    describe("serviceName is set, metrics is supported, widgets is supported", function() {
-      beforeEach(function () {
-        view.set('controller.content.serviceName', 'S1');
-        this.mock.returns(true);
-        view.didInsertElement();
-      });
-
-      it("loadWidgetLayouts should be called", function() {
-        expect(view.get('controller').loadWidgetLayouts.calledOnce).to.be.true;
-      });
-      it("constructGraphObjects should be called", function() {
-        expect(view.constructGraphObjects.calledOnce).to.be.true;
-      });
-    });
-  });
-
-  describe("#addWidgetTooltip()", function() {
-    var mock = {
-      hoverIntent: Em.K
-    };
-
-    beforeEach(function() {
-      sinon.stub(Em.run, 'later', function(arg1, callback) {
-        callback();
-      });
-      sinon.stub(App, 'tooltip');
-      sinon.stub(window, '$').returns(mock);
-      sinon.spy(mock, 'hoverIntent');
-      view.addWidgetTooltip();
-    });
-    afterEach(function() {
-      Em.run.later.restore();
-      App.tooltip.restore();
-      window.$.restore();
-      mock.hoverIntent.restore();
-    });
-
-    it("Em.run.later should be called", function() {
-      expect(Em.run.later.calledOnce).to.be.true;
-    });
-    it("App.tooltip should be called", function() {
-      expect(App.tooltip.calledOnce).to.be.true;
-    });
-    it("hoverIntent should be called", function() {
-      expect(mock.hoverIntent.calledOnce).to.be.true;
-    });
-  });
-
 });
\ No newline at end of file


[12/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
new file mode 100644
index 0000000..fd1cde6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.default import default
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+import status_params
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'DRUID_BROKER': 'druid-broker',
+  'DRUID_COORDINATOR': 'druid-coordinator',
+  'DRUID_HISTORICAL': 'druid-historical',
+  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
+  'DRUID_OVERLORD': 'druid-overlord',
+  'DRUID_ROUTER': 'druid-router'
+}
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# stack version
+stack_version = default("/commandParams/version", None)
+
+# un-formatted stack version
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+
+# default role to coordinator needed for service checks
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
+
+hostname = config['hostname']
+sudo = AMBARI_SUDO_BINARY
+
+# default druid parameters
+druid_home = format("{stack_root}/current/{component_directory}")
+druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+druid_common_conf_dir = druid_conf_dir + "/_common"
+druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
+druid_overlord_conf_dir = druid_conf_dir + "/overlord"
+druid_broker_conf_dir = druid_conf_dir + "/broker"
+druid_historical_conf_dir = druid_conf_dir + "/historical"
+druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
+druid_router_conf_dir = druid_conf_dir + "/router"
+druid_extensions_dir = druid_home + "/extensions"
+druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
+druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
+druid_segment_cache_locations = config['configurations']['druid-historical']['druid.segmentCache.locations']
+druid_tasks_dir = config['configurations']['druid-middlemanager']['druid.indexer.task.baseTaskDir']
+druid_user = config['configurations']['druid-env']['druid_user']
+druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
+druid_classpath = config['configurations']['druid-env']['druid_classpath']
+druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
+druid_repo_list = config['configurations']['druid-common']['druid.extensions.repositoryList']
+druid_extensions_load_list = config['configurations']['druid-common']['druid.extensions.loadList']
+druid_security_extensions_load_list = config['configurations']['druid-common']['druid.security.extensions.loadList']
+
+
+# status params
+druid_pid_dir = status_params.druid_pid_dir
+user_group = config['configurations']['cluster-env']['user_group']
+java8_home = config['hostLevelParams']['java_home']
+druid_env_sh_template = config['configurations']['druid-env']['content']
+
+# log4j params
+log4j_props = config['configurations']['druid-log4j']['content']
+druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
+metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
+root_log_level = config['configurations']['druid-log4j']['root_log_level']
+
+druid_log_maxbackupindex = default('/configurations/druid-logrotate/druid_log_maxbackupindex', 7)
+druid_log_maxfilesize = default('/configurations/druid-logrotate/druid_log_maxfilesize', 256)
+logrotate_props = config['configurations']['druid-logrotate']['content']
+
+# Metadata storage
+metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
+metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
+metadata_storage_db_name = config['configurations']['druid-common']['database_name']
+metadata_storage_db_name = config['configurations']['druid-common']['database_name']
+metadata_storage_type = config['configurations']['druid-common']['druid.metadata.storage.type']
+metadata_storage_url = config['configurations']['druid-common']['druid.metadata.storage.connector.connectURI']
+jdk_location = config['hostLevelParams']['jdk_location']
+if 'mysql' == metadata_storage_type:
+  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  connector_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
+  connector_download_dir=format("{druid_extensions_dir}/mysql-metadata-storage")
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+# HDFS
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
+                                                                                                             hostname)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+# Kerberos
+druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
+                               'missing_principal')
+druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
+
+import functools
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs,
+  immutable_paths=get_not_managed_resources(),
+  dfs_type=dfs_type
+)
+
+# Ambari Metrics
+metric_emitter_type = "noop"
+metric_collector_host = ""
+metric_collector_port = ""
+metric_collector_protocol = ""
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+if has_metric_collector:
+    metric_emitter_type = "ambari-metrics"
+    if 'cluster-env' in config['configurations'] and \
+                    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+        metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+    else:
+        metric_collector_host = ams_collector_hosts[0]
+    if 'cluster-env' in config['configurations'] and \
+                    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+        metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+    else:
+        metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+        if metric_collector_web_address.find(':') != -1:
+            metric_collector_port = metric_collector_web_address.split(':')[1]
+        else:
+            metric_collector_port = '6188'
+    if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+        metric_collector_protocol = 'https'
+    else:
+        metric_collector_protocol = 'http'
+    pass
+
+# Create current Hadoop Clients  Libs
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
+hadoop_lib_home = stack_root + '/' + stack_version + '/hadoop/lib'

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
new file mode 100644
index 0000000..1731a2a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidRouter(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="router")
+
+
+if __name__ == "__main__":
+  DruidRouter().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
new file mode 100644
index 0000000..139b727
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
+    self.checkComponent(params, "druid_overlord", "druid-overlord")
+
+  def checkComponent(self, params, component_name, config_name):
+    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
+    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
+      Execute(format(
+        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
+        tries=10,
+        try_sleep=3,
+        logoutput=True)
+
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
new file mode 100644
index 0000000..ee1d61c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
new file mode 100644
index 0000000..c68b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
@@ -0,0 +1,37 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol": {
+      "type": "HTTP_ONLY"
+    },
+    "links": [
+      {
+        "name": "coordinator_console",
+        "label": "Druid Coordinator Console",
+        "component_name": "DRUID_COORDINATOR",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8081",
+          "regex": "^(\\d+)$",
+          "site": "druid-coordinator"
+        }
+      },
+      {
+        "name": "overlord_console",
+        "label": "Druid Overlord Console",
+        "component_name": "DRUID_OVERLORD",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8090",
+          "regex": "^(\\d+)$",
+          "site": "druid-overlord"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
new file mode 100644
index 0000000..4d697fe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
@@ -0,0 +1,17 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for Druid",
+    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
+    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
+    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
+    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
+    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
+    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
new file mode 100644
index 0000000..7033e19
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
@@ -0,0 +1,120 @@
+{
+  "name": "default",
+  "description": "Default theme for Druid service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "metadata_storage",
+            "display-name": "META DATA STORAGE CONFIG",
+            "layout": {
+              "tab-columns": "",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-metadata-storage",
+                  "display-name": "",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-metadata-storage-row1-col1",
+                      "display-name": "DRUID META DATA STORAGE",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "druid-common/database_name",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.type",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.user",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.password",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/metastore_hostname",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.port",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.connectURI",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "druid-common/database_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.type",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "druid-common/metastore_hostname",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.port",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.connectURI",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
deleted file mode 100644
index 6146ca3..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/broker</value>
-    <description>The druid.service name of broker node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8082</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.http.numConnections</name>
-    <value>20</value>
-    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numMergeBuffers</name>
-    <value>2</value>
-    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.useCache</name>
-    <value>true</value>
-    <description>Enable the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.populateCache</name>
-    <value>true</value>
-    <description>Populate the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.type</name>
-    <value>local</value>
-    <description>The type of cache to use for queries.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.sizeInBytes</name>
-    <value>10000000</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>Maximum cache size in bytes. Zero disables caching.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
deleted file mode 100644
index d3b53cd..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
+++ /dev/null
@@ -1,270 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.extensions.pullList</name>
-    <value>[]</value>
-    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.extensions.repositoryList</name>
-    <value>[]</value>
-    <description>A comma-separated list of maven repositories to download extensions.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.extensions.loadList</name>
-    <value>["druid-datasketches"]
-    </value>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <description>A comma-separated list of one or more druid extensions to load.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.security.extensions.loadList</name>
-    <value>[]</value>
-    <description>A comma-separated list of one or more druid security extensions to load. This property will be set via the kerberos wizard and User will not be allowed to modify this when security is enabled.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.zk.service.host</name>
-    <value>localhost:2181</value>
-    <description>
-      zookeeper connection string.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.zk.paths.base</name>
-    <value>/druid</value>
-    <description>
-      Base Zookeeper path
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.discovery.curator.path</name>
-    <value>/druid/discovery</value>
-    <description>
-      Services announce themselves under this ZooKeeper path.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.type</name>
-    <value></value>
-    <description>
-      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.storageDirectory</name>
-    <value></value>
-    <description>
-      directory to use as deep storage.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Metadata storage password</display-name>
-    <description>Password for the metadata storage data base.</description>
-    <value-attributes>
-      <type>password</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.user</name>
-    <value>druid</value>
-    <display-name>Metadata storage user</display-name>
-    <description>Metadata storage user</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.port</name>
-    <value>1527</value>
-    <display-name>Metadata storage port</display-name>
-    <description>Metadata storage port</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>database_name</name>
-    <value>druid</value>
-    <display-name>Druid Metadata storage database name</display-name>
-    <description>Druid Metadata storage database name</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metastore_hostname</name>
-    <value>localhost</value>
-    <display-name>Metadata storage hostname</display-name>
-    <description>Metadata storage hostname</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.type</name>
-    <display-name>Druid Metadata storage type</display-name>
-    <value>derby</value>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mysql</value>
-          <label>MYSQL</label>
-        </entry>
-        <entry>
-          <value>derby</value>
-          <label>DERBY</label>
-        </entry>
-        <entry>
-          <value>postgresql</value>
-          <label>POSTGRESQL</label>
-        </entry>
-      </entries>
-    </value-attributes>
-    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
-      within the same node. Use mysql or postgres for distributed mode.
-      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.connector.connectURI</name>
-    <value>jdbc:derby://localhost:1527/druid;create=true</value>
-    <display-name>Metadata storage connector url</display-name>
-    <description>Metadata storage connector url</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>database_name</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>metastore_hostname</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.port</name>
-      </property>
-    </depends-on>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.principal</name>
-    <display-name>kerberos principal</display-name>
-    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
-    <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.keytab</name>
-    <display-name>Kerberos keytab location</display-name>
-    <description>Kerberos keytab location</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>druid.emitter</name>
-    <value>{{metric_emitter_type}}</value>
-    <description>Emitter used to emit metrics. Values - "noop", "logging", "ambari-metrics", or "http" will initialize
-      one of the emitter modules.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.hostname</name>
-    <value>{{metric_collector_host}}</value>
-    <description>Timeline host</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.port</name>
-    <value>{{metric_collector_port}}</value>
-    <description>Timeline port</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.protocol</name>
-    <value>{{metric_collector_protocol}}</value>
-    <description>Timeline protocol(http or https)</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStorePath</name>
-    <value>{{metric_truststore_path}}</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStoreType</name>
-    <value>{{metric_truststore_type}}</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStorePassword</name>
-    <value>{{metric_truststore_password}}</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.eventConverter</name>
-    <value>{"type":"whiteList"}</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.monitoring.monitors</name>
-    <value>["com.metamx.metrics.JvmMonitor"]</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
deleted file mode 100644
index 618f11d..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/coordinator</value>
-    <description>The druid.service name of coordinator node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8081</value>
-    <description>The port on which the coordinator will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.merge.on</name>
-    <value>false</value>
-    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
-      segment size.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
deleted file mode 100644
index 2e96f6a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!--Heap Settings -->
-  <property>
-    <name>druid.broker.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.heap.memory</name>
-    <value>256</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- DirectMemorySettings -->
-  <property>
-    <name>druid.broker.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>
-        <type>druid-broker</type>
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>
-        <type>druid-broker</type>
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>
-        <type>druid-historical</type>
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>
-        <type>druid-historical</type>
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- JavaOpts Tune GC related configs here-->
-  <property>
-    <name>druid.broker.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_user</name>
-    <display-name>Druid User</display-name>
-    <value>druid</value>
-    <property-type>USER</property-type>
-    <description></description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-      <user-groups>
-        <property>
-          <type>cluster-env</type>
-          <name>user_group</name>
-        </property>
-      </user-groups>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_log_dir</name>
-    <value>/var/log/druid</value>
-    <description></description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_pid_dir</name>
-    <value>/var/run/druid</value>
-    <display-name>Druid PID dir</display-name>
-    <description></description>
-    <value-attributes>
-      <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- druid-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>druid-env template</display-name>
-    <description>This is simple template for druid-env.sh file</description>
-    <value>
-      #!/bin/bash
-
-      # Set DRUID specific environment variables here.
-
-      # The java implementation to use.
-      export JAVA_HOME={{java8_home}}
-      export PATH=$JAVA_HOME/bin:$PATH
-      export DRUID_PID_DIR={{druid_pid_dir}}
-      export DRUID_LOG_DIR={{druid_log_dir}}
-      export DRUID_CONF_DIR={{druid_conf_dir}}
-      export DRUID_LIB_DIR={{druid_home}}/lib
-      export HADOOP_CONF_DIR={{hadoop_conf_dir}}
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
deleted file mode 100644
index 5ff30ce..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/historical</value>
-    <description>The druid.service name of historical node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8083</value>
-    <description>The port on which the historical nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numMergeBuffers</name>
-    <value>2</value>
-    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
-      values can require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>10</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.locations</name>
-    <value>[{"path":"/apps/druid/segmentCache","maxSize":300000000000}]</value>
-    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
-      then served by the Historical node. These locations define where that local cache resides.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.maxSize</name>
-    <value>300000000000</value>
-    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
-      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
-      accordingly.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.infoDir</name>
-    <value>/apps/druid/segmentCache/info_dir</value>
-    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
-      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
-      metadata is kept. Directory will be created if needed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
deleted file mode 100644
index bcb731a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>druid_log_level</name>
-    <value>info</value>
-    <description>Log level for io.druid logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>root_log_level</name>
-    <value>WARN</value>
-    <description>Log level for root logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metamx_log_level</name>
-    <value>info</value>
-    <description>Log level for com.metamxlogging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>druid-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-    <Configuration>
-        <Appenders>
-            <Console name="Console" target="SYSTEM_OUT">
-                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
-            </Console>
-        </Appenders>
-        <Loggers>
-            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
-            <Logger name="io.druid" level="{{druid_log_level}}"/>
-            <Root level="{{root_log_level}}">
-                <AppenderRef ref="Console"/>
-            </Root>
-        </Loggers>
-    </Configuration>
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
deleted file mode 100644
index b7308ce..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>druid_log_maxfilesize</name>
-    <value>256</value>
-    <description>The maximum size of backup file before the log is rotated</description>
-    <display-name>Druid Log: backup file size</display-name>
-    <value-attributes>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_log_maxbackupindex</name>
-    <value>7</value>
-    <description>The number of backup files</description>
-    <display-name>Druid Log: # of backup files</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>druid logrotate template</display-name>
-    <description>Custom logrotate file</description>
-    <value><![CDATA[
-    {{druid_log_dir}}/*.log {
-        copytruncate
-        rotate {{druid_log_maxbackupindex}}
-        daily
-        nocompress
-        missingok
-        notifempty
-        create 660 druid users
-        dateext
-        dateformat -%Y-%m-%d-%s
-        size {{druid_log_maxfilesize}}M
-        }
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
deleted file mode 100644
index 08280ad..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/middlemanager</value>
-    <description>The druid.service name of middlemanager node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8091</value>
-    <description>The port on which the middlemanager nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.startPort</name>
-    <value>8100</value>
-    <description>The port that peons begin running on.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.worker.capacity</name>
-    <value>3</value>
-    <description>
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.javaOpts</name>
-    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
-    <description>
-      A string of -X Java options to pass to the peon's JVM.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.baseTaskDir</name>
-    <value>/apps/druid/tasks</value>
-    <description>
-      Base temporary working directory for druid tasks.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>
-      Number of threads for HTTP requests.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>256000000</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>
-      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
-      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>
-      The number of processing threads to have available for parallel processing of segments.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.hadoopWorkingPath</name>
-    <value>/tmp/druid-indexing</value>
-    <description>
-      Temporary working directory for Hadoop tasks
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.fork.property.hadoop.mapreduce.reduce.java.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <description>
-      Default java properties from Reducer containers
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>druid.indexer.fork.property.hadoop.mapreduce.map.java.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <description>
-      Default java properties from Map containers
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
deleted file mode 100644
index 57d1c63..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/overlord</value>
-    <description>The druid.service name of overlord node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8090</value>
-    <description>The port on which the overlord will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.type</name>
-    <value>remote</value>
-    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
-      environment.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.storage.type</name>
-    <value>metadata</value>
-    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
-      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
-      should fail.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
deleted file mode 100644
index d544315..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/router</value>
-    <description>The druid.service name of router node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8888</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.http.numConnections</name>
-    <value>20</value>
-    <description>
-      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.tierToBrokerMap</name>
-    <value>{"_default_tier":"druid/broker"}</value>
-    <description>
-      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
-      tiers to broker names. The priority of brokers is based on the ordering.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
deleted file mode 100644
index f9f1a35..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
+++ /dev/null
@@ -1,223 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>DRUID</name>
-      <displayName>Druid</displayName>
-      <comment>A fast column-oriented distributed data store. This service is &lt;b&gt;Technical Preview&lt;/b&gt;.</comment>
-      <version>0.9.2</version>
-      <components>
-        <component>
-          <name>DRUID_COORDINATOR</name>
-          <displayName>Druid Coordinator</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/coordinator.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-coordinator</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_OVERLORD</name>
-          <displayName>Druid Overlord</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/overlord.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-overlord</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_HISTORICAL</name>
-          <displayName>Druid Historical</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historical.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-historical</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_BROKER</name>
-          <displayName>Druid Broker</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/broker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-broker</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_MIDDLEMANAGER</name>
-          <displayName>Druid MiddleManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/middlemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-middlemanager</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_ROUTER</name>
-          <displayName>Druid Router</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/router.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-router</config-type>
-          </configuration-dependencies>
-        </component>
-      </components>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>druid_${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>druid-${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-      <configuration-dependencies>
-        <config-type>druid-common</config-type>
-        <config-type>druid-env</config-type>
-        <config-type>druid-log4j</config-type>
-        <config-type>druid-logrotate</config-type>
-        <config-type>zoo.cfg</config-type>
-      </configuration-dependencies>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
deleted file mode 100644
index bd170cb..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidBroker(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="broker")
-
-
-if __name__ == "__main__":
-  DruidBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
deleted file mode 100644
index a86fa40..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidCoordinator(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="coordinator")
-
-
-if __name__ == "__main__":
-  DruidCoordinator().execute()


[27/50] [abbrv] ambari git commit: Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/snippet/Snippet.java
----------------------------------------------------------------------
diff --git a/ambari-server/snippet/Snippet.java b/ambari-server/snippet/Snippet.java
deleted file mode 100644
index f13d533..0000000
--- a/ambari-server/snippet/Snippet.java
+++ /dev/null
@@ -1,8 +0,0 @@
-package snippet;
-
-public class Snippet {
-  public static void main(String[] args) {
-    /home/user/ambari/ambari-views/bin/.project
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index c32044c..2224d31 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,9 +95,7 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
-                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
-      
 
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index ae33a2a..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,8 +80,7 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False,
-                    available_packages_in_repos = []):
+                    log_out_files=False):
 
     norm_path = os.path.normpath(path)
 
@@ -126,7 +125,6 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
-        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))


[05/50] [abbrv] ambari git commit: AMBARI-22139 - CURRENT cluster Shows Upgrade If Component Didn't Report Version (jonathanhurley)

Posted by ja...@apache.org.
AMBARI-22139 - CURRENT cluster Shows Upgrade If Component Didn't Report Version (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8b83a0a5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8b83a0a5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8b83a0a5

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 8b83a0a538358e54c5aa7f3c4eccc4a458296f9a
Parents: a9d2698
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Oct 5 16:06:06 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 6 10:38:29 2017 -0400

----------------------------------------------------------------------
 .../python/resource_management/TestScript.py    | 26 ++++++++-
 .../libraries/script/script.py                  | 22 ++++++--
 .../0.4.0/package/scripts/ranger_admin.py       | 13 ++---
 .../configs/ranger_admin_default.json           | 55 ++++++++++++++++++++
 .../src/test/python/stacks/utils/RMFTestCase.py |  9 ++--
 5 files changed, 110 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-agent/src/test/python/resource_management/TestScript.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestScript.py b/ambari-agent/src/test/python/resource_management/TestScript.py
index 75726d6..79d0598 100644
--- a/ambari-agent/src/test/python/resource_management/TestScript.py
+++ b/ambari-agent/src/test/python/resource_management/TestScript.py
@@ -21,9 +21,9 @@ import StringIO
 import sys, pprint
 from resource_management.libraries.script import Script
 from resource_management.core.environment import Environment
+from resource_management.core.logger import Logger
 from mock.mock import patch, MagicMock
 from stacks.utils.RMFTestCase import *
-import logging
 
 class TestScript(RMFTestCase):
 
@@ -110,7 +110,7 @@ class TestScript(RMFTestCase):
   @patch("__builtin__.open")
   def test_status_commands_clear_structured_out(self, open_mock):
     """
-    Tests that status commands will clear and stored structured output from prior status commands.
+    Tests that status commands will clear any stored structured output from prior status commands.
     :param open_mock: 
     :return: 
     """
@@ -141,6 +141,28 @@ class TestScript(RMFTestCase):
     self.assertTrue(open_mock.called)
     self.assertEquals({}, Script.structuredOut)
 
+
+  @patch.object(Logger, "error", new = MagicMock())
+  @patch.object(Script, "put_structured_out")
+  @patch("resource_management.libraries.functions.version_select_util.get_component_version_from_symlink", new = MagicMock(return_value=None))
+  @patch("resource_management.libraries.functions.stack_select.get_package_name", new = MagicMock(return_value="foo-package"))
+  @patch("resource_management.libraries.functions.stack_select.unsafe_get_stack_versions", new = MagicMock(return_value=("",0,["2.6.0.0-1234"])))
+  def test_save_version_structured_out_stack_select(self, pso_mock):
+    """
+    Tests that when writing out the version of the component to the structure output,
+    if all else fails, we'll invoke the stack-select tool to see if there are any versions
+    reported.
+    :param pso_mock:
+    :return:
+    """
+    script = Script()
+    script.stroutfile = ''
+    script.save_component_version_to_structured_out("start")
+
+    self.assertEqual(pso_mock.call_count, 1)
+    self.assertEquals(pso_mock.call_args[0][0], {'version':'2.6.0.0-1234'})
+
+
   def tearDown(self):
     # enable stdout
     sys.stdout = sys.__stdout__

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index e612638..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -47,7 +47,7 @@ from resource_management.core.environment import Environment
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail, ClientComponentHasNoStatus, ComponentIsNotRunning
 from resource_management.core.resources.packaging import Package
-from resource_management.libraries.functions.version_select_util import get_component_version_from_symlink
+from resource_management.libraries.functions import version_select_util
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions import stack_tools
@@ -212,6 +212,12 @@ class Script(object):
     Saves the version of the component for this command to the structured out file. If the
     command is an install command and the repository is trusted, then it will use the version of
     the repository. Otherwise, it will consult the stack-select tool to read the symlink version.
+
+    Under rare circumstances, a component may have a bug which prevents it from reporting a
+    version back after being installed. This is most likely due to the stack-select tool not being
+    invoked by the package's installer. In these rare cases, we try to see if the component
+    should have reported a version and we try to fallback to the "<stack-select> versions" command.
+
     :param command_name: command name
     :return: None
     """
@@ -240,7 +246,17 @@ class Script(object):
     if stack_select_package_name and stack_name:
       # only query for the component version from stack-select if we can't trust the repository yet
       if component_version is None:
-        component_version = get_component_version_from_symlink(stack_name, stack_select_package_name)
+        component_version = version_select_util.get_component_version_from_symlink(stack_name, stack_select_package_name)
+
+      # last ditch effort - should cover the edge case where the package failed to setup its
+      # link and we have to try to see if <stack-select> can help
+      if component_version is None:
+        output, code, versions = stack_select.unsafe_get_stack_versions()
+        if len(versions) == 1:
+          component_version = versions[0]
+          Logger.error("The '{0}' component did not advertise a version. This may indicate a problem with the component packaging. " \
+                         "However, the stack-select tool was able to report a single version installed ({1}). " \
+                         "This is the version that will be reported.".format(stack_select_package_name, component_version))
 
       if component_version:
         self.put_structured_out({"version": component_version})
@@ -252,7 +268,7 @@ class Script(object):
           self.put_structured_out({"repository_version_id": repo_version_id})
       else:
         if not self.is_hook():
-          Logger.error("Component '{0}' did not advertise a version. This may indicate a problem with the component packaging.".format(stack_select_package_name))
+          Logger.error("The '{0}' component did not advertise a version. This may indicate a problem with the component packaging.".format(stack_select_package_name))
 
 
   def should_expose_component_version(self, command_name):

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
index f779c18..848b137 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -20,7 +20,6 @@ limitations under the License.
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.script import Script
 from resource_management.core.resources.system import Execute, File
 from resource_management.core.exceptions import ComponentIsNotRunning
@@ -28,10 +27,12 @@ from resource_management.libraries.functions.format import format
 from resource_management.core.logger import Logger
 from resource_management.core import shell
 from ranger_service import ranger_service
-from setup_ranger_xml import setup_ranger_audit_solr, setup_ranger_admin_passwd_change, update_password_configs
 from resource_management.libraries.functions import solr_cloud_util
-from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING
 from resource_management.libraries.functions.constants import Direction
+
+import setup_ranger_xml
+
 import os, errno
 
 class RangerAdmin(Script):
@@ -93,9 +94,9 @@ class RangerAdmin(Script):
 
     if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
       solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
-      setup_ranger_audit_solr()
+      setup_ranger_xml.setup_ranger_audit_solr()
 
-    update_password_configs()
+    setup_ranger_xml.update_password_configs()
     ranger_service('ranger_admin')
 
 
@@ -142,7 +143,7 @@ class RangerAdmin(Script):
         setup_java_patch()
 
       if params.stack_supports_ranger_admin_password_change:
-        setup_ranger_admin_passwd_change()
+        setup_ranger_xml.setup_ranger_admin_passwd_change()
 
   def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
     config_dir = os.path.dirname(upgrade_marker_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json b/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
index b360c07..2e66c62 100644
--- a/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
+++ b/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
@@ -64,6 +64,61 @@
       "db_host": "localhost",
       "xa_ldap_groupRoleAttribute": "\"cn\""
     },
+    "ranger-ugsync-site": {
+      "ranger.usersync.ldap.binddn": "",
+      "ranger.usersync.policymgr.username": "rangerusersync",
+      "ranger.usersync.policymanager.mockrun": "false",
+      "ranger.usersync.group.searchbase": "",
+      "ranger.usersync.ldap.bindalias": "testldapalias",
+      "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+      "ranger.usersync.port": "5151",
+      "ranger.usersync.pagedresultssize": "500",
+      "ranger.usersync.group.memberattributename": "",
+      "ranger.usersync.kerberos.principal": "",
+      "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+      "ranger.usersync.ldap.referral": "ignore",
+      "ranger.usersync.group.searchfilter": "",
+      "ranger.usersync.ldap.user.objectclass": "person",
+      "ranger.usersync.logdir": "{{usersync_log_dir}}",
+      "ranger.usersync.ldap.user.searchfilter": "",
+      "ranger.usersync.ldap.groupname.caseconversion": "none",
+      "ranger.usersync.ldap.ldapbindpassword": "",
+      "ranger.usersync.unix.minUserId": "500",
+      "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+      "ranger.usersync.group.nameattribute": "",
+      "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+      "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+      "ranger.usersync.user.searchenabled": "false",
+      "ranger.usersync.group.usermapsyncenabled": "true",
+      "ranger.usersync.ldap.bindkeystore": "",
+      "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+      "ranger.usersync.kerberos.keytab": "",
+      "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+      "ranger.usersync.group.objectclass": "",
+      "ranger.usersync.ldap.user.searchscope": "sub",
+      "ranger.usersync.unix.password.file": "/etc/passwd",
+      "ranger.usersync.ldap.user.nameattribute": "",
+      "ranger.usersync.pagedresultsenabled": "true",
+      "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+      "ranger.usersync.group.search.first.enabled": "false",
+      "ranger.usersync.group.searchenabled": "false",
+      "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+      "ranger.usersync.ssl": "true",
+      "ranger.usersync.ldap.url": "",
+      "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+      "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+      "ranger.usersync.ldap.user.searchbase": "",
+      "ranger.usersync.ldap.username.caseconversion": "none",
+      "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+      "ranger.usersync.keystore.password": "UnIx529p",
+      "ranger.usersync.unix.group.file": "/etc/group",
+      "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+      "ranger.usersync.group.searchscope": "",
+      "ranger.usersync.truststore.password": "changeit",
+      "ranger.usersync.enabled": "true",
+      "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+      "ranger.usersync.filesource.text.delimiter": ","
+    },
     "ranger-site": {
       "http.enabled": "true",
       "http.service.port": "6080",

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index d98e0b1..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -154,11 +154,12 @@ class RMFTestCase(TestCase):
                     with patch('resource_management.libraries.functions.stack_select.is_package_supported', return_value=True):
                       with patch('resource_management.libraries.functions.stack_select.get_supported_packages', return_value=MagicMock()):
                         with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
-                          if not try_install:
-                            with patch.object(Script, 'install_packages') as install_mock_value:
+                          with patch('resource_management.libraries.functions.stack_select.unsafe_get_stack_versions', return_value = (("",0,[]))):
+                            if not try_install:
+                              with patch.object(Script, 'install_packages') as install_mock_value:
+                                method(RMFTestCase.env, *command_args)
+                            else:
                               method(RMFTestCase.env, *command_args)
-                          else:
-                            method(RMFTestCase.env, *command_args)
 
     sys.path.remove(scriptsdir)
 


[22/50] [abbrv] ambari git commit: AMBARI-22168 Move service metrics to separate tab. (atkach)

Posted by ja...@apache.org.
AMBARI-22168 Move service metrics to separate tab. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6eb273e1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6eb273e1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6eb273e1

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 6eb273e19a81773c27f235631c54a3e142277f08
Parents: e83c86d
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Oct 9 14:11:36 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Oct 9 15:01:28 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/assets/test/tests.js             |   2 +
 ambari-web/app/controllers.js                   |   1 +
 .../app/controllers/main/service/info/metric.js | 468 +++++++++++++++++++
 .../controllers/main/service/info/summary.js    | 449 +-----------------
 .../service/widgets/create/wizard_controller.js |   2 +-
 ambari-web/app/messages.js                      |   1 +
 .../app/styles/enhanced_service_dashboard.less  |  26 +-
 .../app/templates/main/service/info/metrics.hbs | 104 +++++
 .../app/templates/main/service/info/summary.hbs |  84 ----
 ambari-web/app/templates/main/service/item.hbs  |   5 +-
 ambari-web/app/views.js                         |   1 +
 ambari-web/app/views/main/service/info/menu.js  |   7 +
 .../app/views/main/service/info/metrics_view.js | 290 ++++++++++++
 .../app/views/main/service/info/summary.js      | 315 ++-----------
 ambari-web/app/views/main/service/item.js       |   6 +
 .../main/service/info/metric_test.js            | 110 +++++
 .../main/service/info/summary_test.js           |  76 ---
 .../main/service/info/metrics_view_test.js      | 334 +++++++++++++
 .../views/main/service/info/summary_test.js     | 281 +----------
 19 files changed, 1400 insertions(+), 1162 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index 03b4657..7c636d4 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -125,6 +125,7 @@ var files = [
   'test/controllers/main/service/item_test',
   'test/controllers/main/service/info/config_test',
   'test/controllers/main/service/info/summary_test',
+  'test/controllers/main/service/info/metric_test',
   'test/controllers/main/service_test',
   'test/controllers/main/admin_test',
   'test/controllers/main/views_controller_test',
@@ -340,6 +341,7 @@ var files = [
   'test/views/main/service/service_test',
   'test/views/main/service/info/config_test',
   'test/views/main/service/info/summary_test',
+  'test/views/main/service/info/metrics_view_test',
   'test/views/main/service/info/menu_test',
   'test/views/main/service/info/component_list_view_test',
   'test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers.js b/ambari-web/app/controllers.js
index 81e5eb7..f7d77be 100644
--- a/ambari-web/app/controllers.js
+++ b/ambari-web/app/controllers.js
@@ -142,6 +142,7 @@ require('controllers/main/charts');
 require('controllers/main/charts/heatmap_metrics/heatmap_metric');
 require('controllers/main/charts/heatmap');
 require('controllers/main/service/info/heatmap');
+require('controllers/main/service/info/metric');
 require('controllers/main/views_controller');
 require('controllers/main/views/details_controller');
 require('controllers/wizard/step0_controller');

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/info/metric.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/metric.js b/ambari-web/app/controllers/main/service/info/metric.js
new file mode 100644
index 0000000..9dfc32c
--- /dev/null
+++ b/ambari-web/app/controllers/main/service/info/metric.js
@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+
+App.MainServiceInfoMetricsController = Em.Controller.extend(App.WidgetSectionMixin, {
+  name: 'mainServiceInfoMetricsController',
+
+  layoutNameSuffix: "_dashboard",
+
+  sectionNameSuffix: "_SUMMARY",
+
+  /**
+   * Some widget has type `GRAPH`
+   *
+   * @type {boolean}
+   */
+  someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
+
+  /**
+   * @type {boolean}
+   */
+  showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
+
+  /**
+   * @type {boolean}
+   */
+  isWidgetLayoutsLoaded: false,
+
+  /**
+   * @type {boolean}
+   */
+  isAllSharedWidgetsLoaded: false,
+
+  /**
+   * @type {boolean}
+   */
+  isMineWidgetsLoaded: false,
+
+  /**
+   * load widget layouts across all users in CLUSTER scope
+   * @returns {$.ajax}
+   */
+  loadWidgetLayouts: function () {
+    this.set('isWidgetLayoutsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.layouts.get',
+      sender: this,
+      data: {
+        sectionName: this.get('sectionName')
+      },
+      success: 'loadWidgetLayoutsSuccessCallback'
+    });
+  },
+
+  loadWidgetLayoutsSuccessCallback: function (data) {
+    App.widgetLayoutMapper.map(data);
+    this.set('isWidgetLayoutsLoaded', true);
+  },
+
+
+  /**
+   * load all shared widgets to show on widget browser
+   * @returns {$.ajax}
+   */
+  loadAllSharedWidgets: function () {
+    this.set('isAllSharedWidgetsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.all.shared.get',
+      sender: this,
+      success: 'loadAllSharedWidgetsSuccessCallback'
+    });
+  },
+
+  /**
+   * success callback of <code>loadAllSharedWidgets</code>
+   * @param {object|null} data
+   */
+  loadAllSharedWidgetsSuccessCallback: function (data) {
+    var widgetIds = this.get('widgets').mapProperty('id');
+    if (data.items[0] && data.items.length) {
+      this.set("allSharedWidgets",
+        data.items.filter(function (widget) {
+          return widget.WidgetInfo.widget_type != "HEATMAP";
+        }).map(function (widget) {
+          var widgetType = widget.WidgetInfo.widget_type;
+          var widgetName = widget.WidgetInfo.widget_name;
+          var widgetId =  widget.WidgetInfo.id;
+          return Em.Object.create({
+            id: widgetId,
+            widgetName: widgetName,
+            description: widget.WidgetInfo.description,
+            widgetType: widgetType,
+            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+            added: widgetIds.contains(widgetId),
+            isShared: widget.WidgetInfo.scope == "CLUSTER"
+          });
+        })
+      );
+    }
+    this.set('isAllSharedWidgetsLoaded', true);
+  },
+
+  allSharedWidgets: [],
+  mineWidgets: [],
+
+  /**
+   * load all mine widgets of current user to show on widget browser
+   * @returns {$.ajax}
+   */
+  loadMineWidgets: function () {
+    this.set('isMineWidgetsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.all.mine.get',
+      sender: this,
+      data: {
+        loginName: App.router.get('loginName')
+      },
+      success: 'loadMineWidgetsSuccessCallback'
+    });
+  },
+
+  /**
+   * success callback of <code>loadMineWidgets</code>
+   * @param {object|null} data
+   */
+  loadMineWidgetsSuccessCallback: function (data) {
+    var widgetIds = this.get('widgets').mapProperty('id');
+    if (data.items[0] && data.items.length) {
+      this.set("mineWidgets",
+        data.items.filter(function (widget) {
+          return widget.WidgetInfo.widget_type != "HEATMAP";
+        }).map(function (widget) {
+          var widgetType = widget.WidgetInfo.widget_type;
+          var widgetName = widget.WidgetInfo.widget_name;
+          var widgetId =  widget.WidgetInfo.id;
+          return Em.Object.create({
+            id: widget.WidgetInfo.id,
+            widgetName: widgetName,
+            description: widget.WidgetInfo.description,
+            widgetType: widgetType,
+            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+            added: widgetIds.contains(widgetId),
+            isShared: widget.WidgetInfo.scope == "CLUSTER"
+          });
+        })
+      );
+    } else {
+      this.set("mineWidgets", []);
+    }
+    this.set('isMineWidgetsLoaded', true);
+  },
+
+  /**
+   * add widgets, on click handler for "Add"
+   */
+  addWidget: function (event) {
+    var widgetToAdd = event.context;
+    var activeLayout = this.get('activeWidgetLayout');
+    var widgetIds = activeLayout.get('widgets').map(function(widget) {
+      return {
+        "id": widget.get("id")
+      }
+    });
+    widgetIds.pushObject({
+      "id": widgetToAdd.id
+    });
+    var data = {
+      "WidgetLayoutInfo": {
+        "display_name": activeLayout.get("displayName"),
+        "id": activeLayout.get("id"),
+        "layout_name": activeLayout.get("layoutName"),
+        "scope": activeLayout.get("scope"),
+        "section_name": activeLayout.get("sectionName"),
+        "widgets": widgetIds
+      }
+    };
+
+    widgetToAdd.set('added', !widgetToAdd.added);
+    return App.ajax.send({
+      name: 'widget.layout.edit',
+      sender: this,
+      data: {
+        layoutId: activeLayout.get("id"),
+        data: data
+      },
+      success: 'updateActiveLayout'
+    });
+  },
+
+  /**
+   * hide widgets, on click handler for "Added"
+   */
+  hideWidget: function (event) {
+    var widgetToHide = event.context;
+    var activeLayout = this.get('activeWidgetLayout');
+    var widgetIds = activeLayout.get('widgets').map(function (widget) {
+      return {
+        "id": widget.get("id")
+      }
+    });
+    var data = {
+      "WidgetLayoutInfo": {
+        "display_name": activeLayout.get("displayName"),
+        "id": activeLayout.get("id"),
+        "layout_name": activeLayout.get("layoutName"),
+        "scope": activeLayout.get("scope"),
+        "section_name": activeLayout.get("sectionName"),
+        "widgets": widgetIds.filter(function (widget) {
+          return widget.id !== widgetToHide.id;
+        })
+      }
+    };
+
+    widgetToHide.set('added', !widgetToHide.added);
+    return App.ajax.send({
+      name: 'widget.layout.edit',
+      sender: this,
+      data: {
+        layoutId: activeLayout.get("id"),
+        data: data
+      },
+      success: 'hideWidgetSuccessCallback'
+    });
+
+  },
+
+  /**
+   * @param {object|null} data
+   * @param {object} opt
+   * @param {object} params
+   */
+  hideWidgetSuccessCallback: function (data, opt, params) {
+    params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
+      return {
+        WidgetInfo: {
+          id: widget.id
+        }
+      }
+    });
+    App.widgetLayoutMapper.map({items: [params.data]});
+    this.propertyDidChange('widgets');
+  },
+
+  /**
+   * update current active widget layout
+   */
+  updateActiveLayout: function () {
+    this.getActiveWidgetLayout();
+  },
+
+  /**
+   * delete widgets, on click handler for "Delete"
+   */
+  deleteWidget: function (event) {
+    var widget = event.context;
+    var self = this;
+    var confirmMsg =  widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) :  Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
+    var bodyMessage = Em.Object.create({
+      confirmMsg: confirmMsg,
+      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
+    });
+    return App.showConfirmationFeedBackPopup(function (query) {
+      return App.ajax.send({
+        name: 'widget.action.delete',
+        sender: self,
+        data: {
+          id: widget.id
+        },
+        success: 'updateWidgetBrowser'
+      });
+
+    }, bodyMessage);
+  },
+
+  /**
+   * update widget browser content after deleted some widget
+   */
+  updateWidgetBrowser: function () {
+    this.loadAllSharedWidgets();
+    this.loadMineWidgets();
+  },
+
+  /**
+   * Share widgets, on click handler for "Share"
+   */
+  shareWidget: function (event) {
+    var widget = event.context;
+    var self = this;
+    var bodyMessage = Em.Object.create({
+      confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
+      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
+    });
+    return App.showConfirmationFeedBackPopup(function (query) {
+      return App.ajax.send({
+        name: 'widgets.wizard.edit',
+        sender: self,
+        data: {
+          data: {
+            "WidgetInfo": {
+              "widget_name": widget.get("widgetName"),
+              "scope": "CLUSTER"
+            }
+          },
+          widgetId: widget.get("id")
+        },
+        success: 'updateWidgetBrowser'
+      });
+    }, bodyMessage);
+  },
+
+  /**
+   * create widget
+   */
+  createWidget: function () {
+    App.router.send('createServiceWidget', Em.Object.create({
+      layout: this.get('activeWidgetLayout'),
+      serviceName: this.get('content.serviceName')
+    }));
+  },
+
+  /**
+   * edit widget
+   * @param {App.Widget} content
+   */
+  editWidget: function (content) {
+    content.set('serviceName', this.get('content.serviceName'));
+    App.router.send('editServiceWidget', content);
+  },
+
+  /**
+   * launch Widgets Browser popup
+   * @method showPopup
+   * @return {App.ModalPopup}
+   */
+  goToWidgetsBrowser: function () {
+    var self = this;
+
+    return App.ModalPopup.show({
+      header: Em.I18n.t('dashboard.widgets.browser.header'),
+
+      classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
+      modalDialogClasses: ['modal-lg'],
+      onPrimary: function () {
+        this.hide();
+        self.set('isAllSharedWidgetsLoaded', false);
+        self.set('allSharedWidgets', []);
+        self.set('isMineWidgetsLoaded', false);
+        self.set('mineWidgets', []);
+      },
+      autoHeight: false,
+      isHideBodyScroll: false,
+      footerClass: Ember.View.extend({
+        templateName: require('templates/common/modal_popups/widget_browser_footer'),
+        isShowMineOnly: false,
+        onPrimary: function() {
+          this.get('parentView').onPrimary();
+        }
+      }),
+      isShowMineOnly: false,
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/common/modal_popups/widget_browser_popup'),
+        controller: self,
+        willInsertElement: function () {
+          this.get('controller').loadAllSharedWidgets();
+          this.get('controller').loadMineWidgets();
+        },
+
+        isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
+
+        isWidgetEmptyList: Em.computed.empty('filteredContent'),
+
+        activeService: '',
+        activeStatus: '',
+
+        content: function () {
+          if (this.get('parentView.isShowMineOnly')) {
+            return this.get('controller.mineWidgets');
+          } else {
+            // merge my widgets and all shared widgets, no duplicated is allowed
+            var content = [];
+            var widgetMap = {};
+            var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
+            allWidgets.forEach(function(widget) {
+              if (!widgetMap[widget.get("id")]) {
+                content.pushObject(widget);
+                widgetMap[widget.get("id")] = true;
+              }
+            });
+            return content;
+          }
+        }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
+          'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
+
+        /**
+         * displaying content filtered by service name and status.
+         */
+        filteredContent: function () {
+          var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
+          var result = [];
+          this.get('content').forEach(function (widget) {
+            if (widget.get('serviceName').indexOf(activeService) >= 0) {
+              result.pushObject(widget);
+            }
+          });
+          return result;
+        }.property('content', 'activeService', 'activeStatus'),
+
+        /**
+         * service name filter
+         */
+        services: function () {
+          var view = this;
+          var services = App.Service.find().filter(function(item){
+            var stackService =  App.StackService.find().findProperty('serviceName', item.get('serviceName'));
+            return stackService.get('isServiceWithWidgets');
+          });
+          return services.map(function (service) {
+            return Em.Object.create({
+              value: service.get('serviceName'),
+              label: service.get('displayName'),
+              isActive: function () {
+                var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
+                return this.get('value') == activeService;
+              }.property('value', 'view.activeService')
+            })
+          });
+        }.property('activeService'),
+
+        filterByService: function (event) {
+          this.set('activeService', event.context);
+        },
+
+        createWidget: function () {
+          this.get('parentView').onPrimary();
+          this.get('controller').createWidget();
+        },
+
+        ensureTooltip: function () {
+          Em.run.later(this, function () {
+            App.tooltip($("[rel='shared-icon-tooltip']"));
+          }, 1000);
+        }.observes('activeService', 'parentView.isShowMineOnly'),
+
+        didInsertElement: function () {
+          this.ensureTooltip();
+        }
+      })
+    });
+  }
+
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/summary.js b/ambari-web/app/controllers/main/service/info/summary.js
index d696334..3d7483a 100644
--- a/ambari-web/app/controllers/main/service/info/summary.js
+++ b/ambari-web/app/controllers/main/service/info/summary.js
@@ -17,7 +17,7 @@
 
 var App = require('app');
 
-App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMixin, {
+App.MainServiceInfoSummaryController = Em.Controller.extend({
   name: 'mainServiceInfoSummaryController',
 
   selectedFlumeAgent: null,
@@ -40,10 +40,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
    */
   isPreviousRangerConfigsCallFailed: false,
 
-  layoutNameSuffix: "_dashboard",
-
-  sectionNameSuffix: "_SUMMARY",
-
   /**
    * HiveServer2 JDBC connection endpoint data
    * @type {array}
@@ -112,18 +108,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
   ],
 
   /**
-   * Some widget has type `GRAPH`
-   *
-   * @type {boolean}
-   */
-  someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
-
-  /**
-   * @type {boolean}
-   */
-  showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
-
-  /**
    * Set initial Ranger plugins data
    * @method setRangerPlugins
    */
@@ -425,437 +409,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
     });
   },
 
-
-  /**
-   * @type {boolean}
-   */
-  isWidgetLayoutsLoaded: false,
-
-  /**
-   * @type {boolean}
-   */
-  isAllSharedWidgetsLoaded: false,
-
-  /**
-   * @type {boolean}
-   */
-  isMineWidgetsLoaded: false,
-
-
-  /**
-   * load widget layouts across all users in CLUSTER scope
-   * @returns {$.ajax}
-   */
-  loadWidgetLayouts: function () {
-    this.set('isWidgetLayoutsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.layouts.get',
-      sender: this,
-      data: {
-        sectionName: this.get('sectionName')
-      },
-      success: 'loadWidgetLayoutsSuccessCallback'
-    });
-  },
-
-  loadWidgetLayoutsSuccessCallback: function (data) {
-    App.widgetLayoutMapper.map(data);
-    this.set('isWidgetLayoutsLoaded', true);
-  },
-
-
-  /**
-   * load all shared widgets to show on widget browser
-   * @returns {$.ajax}
-   */
-  loadAllSharedWidgets: function () {
-    this.set('isAllSharedWidgetsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.all.shared.get',
-      sender: this,
-      success: 'loadAllSharedWidgetsSuccessCallback'
-    });
-  },
-
-  /**
-   * success callback of <code>loadAllSharedWidgets</code>
-   * @param {object|null} data
-   */
-  loadAllSharedWidgetsSuccessCallback: function (data) {
-    var widgetIds = this.get('widgets').mapProperty('id');
-    if (data.items[0] && data.items.length) {
-      this.set("allSharedWidgets",
-        data.items.filter(function (widget) {
-          return widget.WidgetInfo.widget_type != "HEATMAP";
-        }).map(function (widget) {
-          var widgetType = widget.WidgetInfo.widget_type;
-          var widgetName = widget.WidgetInfo.widget_name;
-          var widgetId =  widget.WidgetInfo.id;
-          return Em.Object.create({
-            id: widgetId,
-            widgetName: widgetName,
-            description: widget.WidgetInfo.description,
-            widgetType: widgetType,
-            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
-            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
-            added: widgetIds.contains(widgetId),
-            isShared: widget.WidgetInfo.scope == "CLUSTER"
-          });
-        })
-      );
-    }
-    this.set('isAllSharedWidgetsLoaded', true);
-  },
-
-  allSharedWidgets: [],
-  mineWidgets: [],
-
-  /**
-   * load all mine widgets of current user to show on widget browser
-   * @returns {$.ajax}
-   */
-  loadMineWidgets: function () {
-    this.set('isMineWidgetsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.all.mine.get',
-      sender: this,
-      data: {
-        loginName: App.router.get('loginName')
-      },
-      success: 'loadMineWidgetsSuccessCallback'
-    });
-  },
-
-  /**
-   * success callback of <code>loadMineWidgets</code>
-   * @param {object|null} data
-   */
-  loadMineWidgetsSuccessCallback: function (data) {
-    var widgetIds = this.get('widgets').mapProperty('id');
-    if (data.items[0] && data.items.length) {
-      this.set("mineWidgets",
-        data.items.filter(function (widget) {
-          return widget.WidgetInfo.widget_type != "HEATMAP";
-        }).map(function (widget) {
-          var widgetType = widget.WidgetInfo.widget_type;
-          var widgetName = widget.WidgetInfo.widget_name;
-          var widgetId =  widget.WidgetInfo.id;
-          return Em.Object.create({
-            id: widget.WidgetInfo.id,
-            widgetName: widgetName,
-            description: widget.WidgetInfo.description,
-            widgetType: widgetType,
-            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
-            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
-            added: widgetIds.contains(widgetId),
-            isShared: widget.WidgetInfo.scope == "CLUSTER"
-          });
-        })
-      );
-    } else {
-      this.set("mineWidgets", []);
-    }
-    this.set('isMineWidgetsLoaded', true);
-  },
-
-  /**
-   * add widgets, on click handler for "Add"
-   */
-  addWidget: function (event) {
-    var widgetToAdd = event.context;
-    var activeLayout = this.get('activeWidgetLayout');
-    var widgetIds = activeLayout.get('widgets').map(function(widget) {
-      return {
-        "id": widget.get("id")
-      }
-    });
-    widgetIds.pushObject({
-      "id": widgetToAdd.id
-    });
-    var data = {
-      "WidgetLayoutInfo": {
-        "display_name": activeLayout.get("displayName"),
-        "id": activeLayout.get("id"),
-        "layout_name": activeLayout.get("layoutName"),
-        "scope": activeLayout.get("scope"),
-        "section_name": activeLayout.get("sectionName"),
-        "widgets": widgetIds
-      }
-    };
-
-    widgetToAdd.set('added', !widgetToAdd.added);
-    return App.ajax.send({
-      name: 'widget.layout.edit',
-      sender: this,
-      data: {
-        layoutId: activeLayout.get("id"),
-        data: data
-      },
-      success: 'updateActiveLayout'
-    });
-  },
-
-  /**
-   * hide widgets, on click handler for "Added"
-   */
-  hideWidget: function (event) {
-    var widgetToHide = event.context;
-    var activeLayout = this.get('activeWidgetLayout');
-    var widgetIds = activeLayout.get('widgets').map(function (widget) {
-      return {
-        "id": widget.get("id")
-      }
-    });
-    var data = {
-      "WidgetLayoutInfo": {
-        "display_name": activeLayout.get("displayName"),
-        "id": activeLayout.get("id"),
-        "layout_name": activeLayout.get("layoutName"),
-        "scope": activeLayout.get("scope"),
-        "section_name": activeLayout.get("sectionName"),
-        "widgets": widgetIds.filter(function (widget) {
-          return widget.id !== widgetToHide.id;
-        })
-      }
-    };
-
-    widgetToHide.set('added', !widgetToHide.added);
-    return App.ajax.send({
-      name: 'widget.layout.edit',
-      sender: this,
-      data: {
-        layoutId: activeLayout.get("id"),
-        data: data
-      },
-      success: 'hideWidgetSuccessCallback'
-    });
-
-  },
-
-  /**
-   * @param {object|null} data
-   * @param {object} opt
-   * @param {object} params
-   */
-  hideWidgetSuccessCallback: function (data, opt, params) {
-    params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
-      return {
-        WidgetInfo: {
-          id: widget.id
-        }
-      }
-    });
-    App.widgetLayoutMapper.map({items: [params.data]});
-    this.propertyDidChange('widgets');
-  },
-
-  /**
-   * update current active widget layout
-   */
-  updateActiveLayout: function () {
-    this.getActiveWidgetLayout();
-  },
-
-  /**
-   * delete widgets, on click handler for "Delete"
-   */
-  deleteWidget: function (event) {
-    var widget = event.context;
-    var self = this;
-    var confirmMsg =  widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) :  Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
-    var bodyMessage = Em.Object.create({
-      confirmMsg: confirmMsg,
-      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
-    });
-    return App.showConfirmationFeedBackPopup(function (query) {
-      return App.ajax.send({
-        name: 'widget.action.delete',
-        sender: self,
-        data: {
-          id: widget.id
-        },
-        success: 'updateWidgetBrowser'
-      });
-
-    }, bodyMessage);
-  },
-
-  /**
-   * update widget browser content after deleted some widget
-   */
-  updateWidgetBrowser: function () {
-    this.loadAllSharedWidgets();
-    this.loadMineWidgets();
-  },
-
-  /**
-   * Share widgets, on click handler for "Share"
-   */
-  shareWidget: function (event) {
-    var widget = event.context;
-    var self = this;
-    var bodyMessage = Em.Object.create({
-      confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
-      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
-    });
-    return App.showConfirmationFeedBackPopup(function (query) {
-      return App.ajax.send({
-        name: 'widgets.wizard.edit',
-        sender: self,
-        data: {
-          data: {
-            "WidgetInfo": {
-              "widget_name": widget.get("widgetName"),
-              "scope": "CLUSTER"
-            }
-          },
-          widgetId: widget.get("id")
-        },
-        success: 'updateWidgetBrowser'
-      });
-    }, bodyMessage);
-  },
-
-  /**
-   * create widget
-   */
-  createWidget: function () {
-    App.router.send('createServiceWidget', Em.Object.create({
-      layout: this.get('activeWidgetLayout'),
-      serviceName: this.get('content.serviceName')
-    }));
-  },
-
-  /**
-   * edit widget
-   * @param {App.Widget} content
-   */
-  editWidget: function (content) {
-    content.set('serviceName', this.get('content.serviceName'));
-    App.router.send('editServiceWidget', content);
-  },
-
-  /**
-   * launch Widgets Browser popup
-   * @method showPopup
-   * @return {App.ModalPopup}
-   */
-  goToWidgetsBrowser: function () {
-    var self = this;
-
-    return App.ModalPopup.show({
-      header: Em.I18n.t('dashboard.widgets.browser.header'),
-
-      classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
-      modalDialogClasses: ['modal-lg'],
-      onPrimary: function () {
-        this.hide();
-        self.set('isAllSharedWidgetsLoaded', false);
-        self.set('allSharedWidgets', []);
-        self.set('isMineWidgetsLoaded', false);
-        self.set('mineWidgets', []);
-      },
-      autoHeight: false,
-      isHideBodyScroll: false,
-      footerClass: Ember.View.extend({
-        templateName: require('templates/common/modal_popups/widget_browser_footer'),
-        isShowMineOnly: false,
-        onPrimary: function() {
-          this.get('parentView').onPrimary();
-        }
-      }),
-      isShowMineOnly: false,
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/common/modal_popups/widget_browser_popup'),
-        controller: self,
-        willInsertElement: function () {
-          this.get('controller').loadAllSharedWidgets();
-          this.get('controller').loadMineWidgets();
-        },
-
-        isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
-
-        isWidgetEmptyList: Em.computed.empty('filteredContent'),
-
-        activeService: '',
-        activeStatus: '',
-
-        content: function () {
-          if (this.get('parentView.isShowMineOnly')) {
-            return this.get('controller.mineWidgets');
-          } else {
-            // merge my widgets and all shared widgets, no duplicated is allowed
-            var content = [];
-            var widgetMap = {};
-            var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
-            allWidgets.forEach(function(widget) {
-              if (!widgetMap[widget.get("id")]) {
-                content.pushObject(widget);
-                widgetMap[widget.get("id")] = true;
-              }
-            });
-            return content;
-          }
-        }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
-          'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
-
-        /**
-         * displaying content filtered by service name and status.
-         */
-        filteredContent: function () {
-          var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
-          var result = [];
-          this.get('content').forEach(function (widget) {
-            if (widget.get('serviceName').indexOf(activeService) >= 0) {
-              result.pushObject(widget);
-            }
-          });
-          return result;
-        }.property('content', 'activeService', 'activeStatus'),
-
-        /**
-         * service name filter
-         */
-        services: function () {
-          var view = this;
-          var services = App.Service.find().filter(function(item){
-            var stackService =  App.StackService.find().findProperty('serviceName', item.get('serviceName'));
-            return stackService.get('isServiceWithWidgets');
-          });
-          return services.map(function (service) {
-            return Em.Object.create({
-              value: service.get('serviceName'),
-              label: service.get('displayName'),
-              isActive: function () {
-                var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
-                return this.get('value') == activeService;
-              }.property('value', 'view.activeService')
-            })
-          });
-        }.property('activeService'),
-
-        filterByService: function (event) {
-          this.set('activeService', event.context);
-        },
-
-        createWidget: function () {
-          this.get('parentView').onPrimary();
-          this.get('controller').createWidget();
-        },
-
-        ensureTooltip: function () {
-          Em.run.later(this, function () {
-            App.tooltip($("[rel='shared-icon-tooltip']"));
-          }, 1000);
-        }.observes('activeService', 'parentView.isShowMineOnly'),
-
-        didInsertElement: function () {
-          this.ensureTooltip();
-        }
-      })
-    });
-  },
-
   goToView: function(event) {
     App.router.route(event.context.get('internalAmbariUrl'));
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js b/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
index e833ead..a46c5e4 100644
--- a/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
+++ b/ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
@@ -417,7 +417,7 @@ App.WidgetWizardController = App.WizardController.extend({
     var self = this;
     var successCallBack = function() {
       self.get('popup').hide();
-      App.router.transitionTo('main.services.service.summary', service);
+      App.router.transitionTo('main.services.service.metrics', service);
       App.get('router.updateController').updateAll();
     };
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 7cde3d1..3c4f038 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2199,6 +2199,7 @@ Em.I18n.translations = {
   'services.service.info.menu.summary':'Summary',
   'services.service.info.menu.configs':'Configs',
   'services.service.info.menu.heatmaps':'Heatmaps',
+  'services.service.info.menu.metrics':'Metrics',
   'services.service.info.summary.hostsRunningMonitor':'{0}/{1}',
   'services.service.info.summary.serversHostCount':'{0} more',
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/styles/enhanced_service_dashboard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/enhanced_service_dashboard.less b/ambari-web/app/styles/enhanced_service_dashboard.less
index 34a4763..00b46a8 100644
--- a/ambari-web/app/styles/enhanced_service_dashboard.less
+++ b/ambari-web/app/styles/enhanced_service_dashboard.less
@@ -26,6 +26,10 @@
 
   clear: both;
 
+  .service-widgets-box {
+    padding: 10px 1.1% 10px 1.1%;
+  }
+
   #add-widget-action-box {
     background-color: @add-widget-btn-color;
     width: 97%;
@@ -69,7 +73,7 @@
     width: 93%;
   }
   .span2p4 {
-    width: 22.7%;
+    width: 24.4%;
     height: 100%;
     background-color: white;
     margin: 5px 0 5px 5px;
@@ -188,6 +192,26 @@
   }
 }
 
+@media (min-width: 1200px) {
+
+  .service-metrics-block .service-widgets-box {
+    padding: 10px 1.3% 10px 1.3%;
+  }
+
+  #widget_layout .span2p4 {
+    width: 24.5%;
+    *width: 24.5%;
+  }
+}
+
+@media (min-width: 1500px) {
+
+  #widget_layout .span2p4 {
+    width: 24.6%;
+    *width: 24.6%;
+  }
+}
+
 #widget-preview {
   max-width: 200px;
   margin: auto;

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/info/metrics.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/metrics.hbs b/ambari-web/app/templates/main/service/info/metrics.hbs
new file mode 100644
index 0000000..6834c06
--- /dev/null
+++ b/ambari-web/app/templates/main/service/info/metrics.hbs
@@ -0,0 +1,104 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.serviceHasMetrics}}
+  <div class="service-metrics-block">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <div class="row">
+          <div class="col-md-7 col-lg-7">
+            <h4 class="panel-title">{{t services.service.metrics}}</h4>
+          </div>
+          <div class="col-md-5 col-lg-5">
+            {{#if showTimeRangeControl}}
+              {{view view.timeRangeListView}}
+            {{/if}}
+            {{#if isServiceWithEnhancedWidgets}}
+              {{#if isAmbariMetricsInstalled}}
+                <div class="btn-group pull-right actions">
+                  <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
+                    {{t common.actions}} &nbsp;<span class="caret"></span>
+                  </button>
+                  <ul class="dropdown-menu">
+                    {{#each option in view.widgetActions}}
+                      <li {{bindAttr class="option.layouts:dropdown-submenu"}}>
+                        {{#if option.isAction}}
+                          <a href="#" {{action doWidgetAction option.action target="view"}}>
+                            <i {{bindAttr class="option.class"}}></i>
+                            {{option.label}}
+                          </a>
+                          {{#if option.layouts}}
+                            <ul class="dropdown-menu">
+                              {{#each layout in option.layouts}}
+                                <li>
+                                  <a href="javascript:void(0);">
+                                    {{layout.layoutName}}
+                                  </a>
+                                </li>
+                              {{/each}}
+                            </ul>
+                          {{/if}}
+                        {{/if}}
+                      </li>
+                    {{/each}}
+                  </ul>
+                </div>
+              {{/if}}
+            {{/if}}
+          </div>
+        </div>
+      </div>
+      <div class="panel-body service-widgets-box">
+        {{#if isServiceWithEnhancedWidgets}}
+          <div id="widget_layout" class="thumbnails">
+            {{#each widget in controller.widgets}}
+              <div class="widget span2p4" {{bindAttr id="widget.id"}}>
+                {{view widget.viewClass contentBinding="widget" idBinding="widget.id"}}
+              </div>
+            {{/each}}
+            {{#if isAmbariMetricsInstalled}}
+              <div class="span2p4">
+                <button id="add-widget-action-box" {{action "goToWidgetsBrowser" controller.content
+                                                            target="controller"}}
+                        rel="add-widget-tooltip" {{translateAttr
+                  data-original-title="dashboard.widgets.addButton.tooltip"}}>
+                  <i class="glyphicon glyphicon-plus"></i></button>
+              </div>
+            {{/if}}
+          </div>
+        {{/if}}
+        <table class="graphs">
+          {{#each graphs in view.serviceMetricGraphs}}
+            <tr>
+              {{#each graph in graphs}}
+                <td>
+                  <div>
+                    {{view graph}}
+                  </div>
+                </td>
+              {{/each}}
+            </tr>
+          {{/each}}
+        </table>
+      </div>
+    </div>
+  </div>
+{{/if}}
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/info/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index 075cae0..b0c9e7f 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -134,90 +134,6 @@
       </div>
     </div>
   </div>
-  {{! widgets in the metrics panel are loaded seperately from summary page text information
-      and does not get block due to any global API poller information }}
-  {{#if view.isServiceMetricLoaded}}
-    <div class="service-metrics-block">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          <div class="row">
-            <div class="col-md-7 col-lg-7">
-              <h4 class="panel-title">{{t services.service.metrics}}</h4>
-            </div>
-            <div class="col-md-5 col-lg-5">
-              {{#if showTimeRangeControl}}
-                {{view view.timeRangeListView}}
-              {{/if}}
-              {{#if isServiceWithEnhancedWidgets}}
-                {{#if isAmbariMetricsInstalled}}
-                  <div class="btn-group pull-right actions">
-                    <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
-                      {{t common.actions}} &nbsp;<span class="caret"></span>
-                    </button>
-                    <ul class="dropdown-menu">
-                      {{#each option in view.widgetActions}}
-                        <li {{bindAttr class="option.layouts:dropdown-submenu"}}>
-                          {{#if option.isAction}}
-                            <a href="#" {{action doWidgetAction option.action target="view"}}>
-                              <i {{bindAttr class="option.class"}}></i>
-                              {{option.label}}
-                            </a>
-                            {{#if option.layouts}}
-                              <ul class="dropdown-menu">
-                                {{#each layout in option.layouts}}
-                                  <li>
-                                    <a href="javascript:void(0);">
-                                      {{layout.layoutName}}
-                                    </a>
-                                  </li>
-                                {{/each}}
-                              </ul>
-                            {{/if}}
-                          {{/if}}
-                        </li>
-                      {{/each}}
-                    </ul>
-                  </div>
-                {{/if}}
-              {{/if}}
-            </div>
-          </div>
-        </div>
-        <div class="panel-body">
-          {{#if isServiceWithEnhancedWidgets}}
-            <div id="widget_layout" class="thumbnails">
-              {{#each widget in controller.widgets}}
-                <div class="widget span2p4" {{bindAttr id="widget.id"}}>
-                  {{view widget.viewClass contentBinding="widget" idBinding="widget.id"}}
-                </div>
-              {{/each}}
-              {{#if isAmbariMetricsInstalled}}
-                <div class="span2p4">
-                  <button id="add-widget-action-box"
-                    {{action "goToWidgetsBrowser" controller.content target="controller"}}
-                          rel="add-widget-tooltip" {{translateAttr data-original-title="dashboard.widgets.addButton.tooltip"}}>
-                    <i class="glyphicon glyphicon-plus"></i></button>
-                </div>
-              {{/if}}
-            </div>
-          {{/if}}
-          <table class="graphs">
-            {{#each graphs in view.serviceMetricGraphs}}
-              <tr>
-                {{#each graph in graphs}}
-                  <td>
-                    <div>
-                      {{view graph}}
-                    </div>
-                  </td>
-                {{/each}}
-              </tr>
-            {{/each}}
-          </table>
-        </div>
-      </div>
-    </div>
-  {{/if}}
 </div>
 
 {{#if view.collapsedSections}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/templates/main/service/item.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/item.hbs b/ambari-web/app/templates/main/service/item.hbs
index e942eb1..df26a9d 100644
--- a/ambari-web/app/templates/main/service/item.hbs
+++ b/ambari-web/app/templates/main/service/item.hbs
@@ -16,7 +16,10 @@
 * limitations under the License.
 }}
 
-{{view App.MainServiceInfoMenuView configTabBinding="view.hasConfigTab" heatmapTabBinding="view.hasHeatmapTab"}}
+{{view App.MainServiceInfoMenuView
+    configTabBinding="view.hasConfigTab"
+    heatmapTabBinding="view.hasHeatmapTab"
+    metricTabBinding="view.hasMetricTab"}}
 
 {{#isAuthorized "SERVICE.RUN_CUSTOM_COMMAND, SERVICE.RUN_SERVICE_CHECK, SERVICE.START_STOP, SERVICE.TOGGLE_MAINTENANCE, SERVICE.ENABLE_HA"}}
 <div class="service-button">

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js
index 8031434..50729a7 100644
--- a/ambari-web/app/views.js
+++ b/ambari-web/app/views.js
@@ -348,6 +348,7 @@ require('views/main/charts/heatmap/heatmap_rack');
 require('views/main/charts/heatmap/heatmap_host');
 require('views/main/charts/heatmap/heatmap_host_detail');
 require('views/main/service/info/heatmap_view');
+require('views/main/service/info/metrics_view');
 
 require('views/main/service/widgets/create/wizard_view');
 require('views/main/service/widgets/create/step1_view');

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/menu.js b/ambari-web/app/views/main/service/info/menu.js
index 3533a72..89d5401 100644
--- a/ambari-web/app/views/main/service/info/menu.js
+++ b/ambari-web/app/views/main/service/info/menu.js
@@ -45,6 +45,13 @@ App.MainServiceInfoMenuView = Em.CollectionView.extend({
         routing: 'configs'
       });
     }
+    if (this.get('metricTab')) {
+      menuItems.push({
+        label: Em.I18n.t('services.service.info.menu.metrics'),
+        id: 'metrics-service-tab',
+        routing: 'metrics'
+      });
+    }
     return menuItems;
   }.property(),
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/metrics_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics_view.js b/ambari-web/app/views/main/service/info/metrics_view.js
new file mode 100644
index 0000000..161dce1
--- /dev/null
+++ b/ambari-web/app/views/main/service/info/metrics_view.js
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+var misc = require('utils/misc');
+require('views/main/service/service');
+require('data/service_graph_config');
+
+App.MainServiceInfoMetricsView = Em.View.extend(App.Persist, App.TimeRangeMixin, {
+  templateName: require('templates/main/service/info/metrics'),
+  /**
+   * @property {Number} chunkSize - number of columns in Metrics section
+   */
+  chunkSize: 5,
+
+  service: null,
+
+  svc: function () {
+    var svc = this.get('controller.content');
+    var svcName = svc.get('serviceName');
+    if (svcName) {
+      switch (svcName.toLowerCase()) {
+        case 'hdfs':
+          svc = App.HDFSService.find().objectAt(0);
+          break;
+        case 'yarn':
+          svc = App.YARNService.find().objectAt(0);
+          break;
+        case 'hbase':
+          svc = App.HBaseService.find().objectAt(0);
+          break;
+        case 'flume':
+          svc = App.FlumeService.find().objectAt(0);
+          break;
+        default:
+          break;
+      }
+    }
+    return svc;
+  }.property('controller.content.serviceName').volatile(),
+
+  getServiceModel: function (serviceName) {
+    var extended = App.Service.extendedModel[serviceName];
+    if (extended) {
+      return App[extended].find().objectAt(0);
+    }
+    return App.Service.find(serviceName);
+  },
+
+  serviceName: Em.computed.alias('service.serviceName'),
+
+  /**
+   * Contains graphs for this particular service
+   */
+  serviceMetricGraphs: [],
+
+  /**
+   * @type {boolean}
+   * @default false
+   */
+  serviceHasMetrics: false,
+
+  /**
+   * Key-name to store time range in Persist
+   * @type {string}
+   */
+  persistKey: Em.computed.format('time-range-service-{0}', 'service.serviceName'),
+
+  didInsertElement: function () {
+    var svcName = this.get('controller.content.serviceName');
+    this.set('service', this.getServiceModel(svcName));
+    var isMetricsSupported = svcName !== 'STORM' || App.get('isStormMetricsSupported');
+
+    this.get('controller').getActiveWidgetLayout();
+    if (App.get('supports.customizedWidgetLayout')) {
+      this.get('controller').loadWidgetLayouts();
+    }
+
+    if (svcName && isMetricsSupported) {
+      var allServices = require('data/service_graph_config');
+      this.constructGraphObjects(allServices[svcName.toLowerCase()]);
+    }
+    this.makeSortable();
+    this.addWidgetTooltip();
+  },
+
+  addWidgetTooltip: function() {
+    Em.run.later(this, function () {
+      App.tooltip($("[rel='add-widget-tooltip']"));
+      // enable description show up on hover
+      $('.img-thumbnail').hoverIntent(function() {
+        if ($(this).is('hover')) {
+          $(this).find('.hidden-description').delay(1000).fadeIn(200).end();
+        }
+      }, function() {
+        $(this).find('.hidden-description').stop().hide().end();
+      });
+    }, 1000);
+  },
+
+  willDestroyElement: function() {
+    $("[rel='add-widget-tooltip']").tooltip('destroy');
+    $('.img-thumbnail').off();
+    $('#widget_layout').sortable('destroy');
+    $('.widget.span2p4').detach().remove();
+    this.get('serviceMetricGraphs').clear();
+    this.set('service', null);
+  },
+
+   /*
+   * Find the graph class associated with the graph name, and split
+   * the array into sections of 5 for displaying on the page
+   * (will only display rows with 5 items)
+   */
+  constructGraphObjects: function (graphNames) {
+    var self = this,
+        stackService = App.StackService.find(this.get('controller.content.serviceName'));
+
+    if (!graphNames && !stackService.get('isServiceWithWidgets')) {
+      this.get('serviceMetricGraphs').clear();
+      this.set('serviceHasMetrics', false);
+      return;
+    }
+
+    // load time range(currentTimeRangeIndex) for current service from server
+    this.getUserPref(self.get('persistKey')).complete(function () {
+      var result = [], graphObjects = [], chunkSize = self.get('chunkSize');
+      if (graphNames) {
+        graphNames.forEach(function (graphName) {
+          graphObjects.push(App["ChartServiceMetrics" + graphName].extend());
+        });
+      }
+      while (graphObjects.length) {
+        result.push(graphObjects.splice(0, chunkSize));
+      }
+      self.set('serviceMetricGraphs', result);
+      self.set('serviceHasMetrics', true);
+    });
+  },
+
+  getUserPrefSuccessCallback: function (response, request) {
+    if (response) {
+      this.set('currentTimeRangeIndex', response);
+    }
+  },
+
+  getUserPrefErrorCallback: function (request) {
+    if (request.status === 404) {
+      this.postUserPref(this.get('persistKey'), 0);
+      this.set('currentTimeRangeIndex', 0);
+    }
+  },
+
+  /**
+   * list of static actions of widget
+   * @type {Array}
+   */
+  staticGeneralWidgetActions: [
+    Em.Object.create({
+      label: Em.I18n.t('dashboard.widgets.actions.browse'),
+      class: 'glyphicon glyphicon-th',
+      action: 'goToWidgetsBrowser',
+      isAction: true
+    })
+  ],
+
+  /**
+   *list of static actions of widget accessible to Admin/Operator privelege
+   * @type {Array}
+   */
+
+  staticAdminPrivelegeWidgetActions: [
+    Em.Object.create({
+      label: Em.I18n.t('dashboard.widgets.create'),
+      class: 'glyphicon glyphicon-plus',
+      action: 'createWidget',
+      isAction: true
+    })
+  ],
+
+  /**
+   * List of static actions related to widget layout
+   */
+  staticWidgetLayoutActions: [
+    Em.Object.create({
+      label: Em.I18n.t('dashboard.widgets.layout.save'),
+      class: 'glyphicon glyphicon-download-alt',
+      action: 'saveLayout',
+      isAction: true
+    }),
+    Em.Object.create({
+      label: Em.I18n.t('dashboard.widgets.layout.import'),
+      class: 'glyphicon glyphicon-file',
+      isAction: true,
+      layouts: App.WidgetLayout.find()
+    })
+  ],
+
+  /**
+   * @type {Array}
+   */
+  widgetActions: function() {
+    var options = [];
+    if (App.isAuthorized('SERVICE.MODIFY_CONFIGS')) {
+      if (App.supports.customizedWidgetLayout) {
+        options.pushObjects(this.get('staticWidgetLayoutActions'));
+      }
+      options.pushObjects(this.get('staticAdminPrivelegeWidgetActions'));
+    }
+    options.pushObjects(this.get('staticGeneralWidgetActions'));
+    return options;
+  }.property(''),
+
+  /**
+   * call action function defined in controller
+   * @param event
+   */
+  doWidgetAction: function(event) {
+    if($.isFunction(this.get('controller')[event.context])) {
+      this.get('controller')[event.context].apply(this.get('controller'));
+    }
+  },
+
+  /**
+   * onclick handler for a time range option
+   * @param {object} event
+   */
+  setTimeRange: function (event) {
+    var graphs = this.get('controller.widgets').filterProperty('widgetType', 'GRAPH'),
+      callback = function () {
+        graphs.forEach(function (widget) {
+          widget.set('properties.time_range', event.context.value);
+        });
+      };
+    this._super(event, callback);
+
+    // Preset time range is specified by user
+    if (event.context.value !== '0') {
+      callback();
+    }
+  },
+
+  /**
+   * Define if some widget is currently moving
+   * @type {boolean}
+   */
+  isMoving: false,
+
+  /**
+   * Make widgets' list sortable on New Dashboard style
+   */
+  makeSortable: function () {
+    var self = this;
+    $('html').on('DOMNodeInserted', '#widget_layout', function () {
+      $(this).sortable({
+        items: "> div",
+        cursor: "move",
+        tolerance: "pointer",
+        scroll: false,
+        update: function () {
+          var widgets = misc.sortByOrder($("#widget_layout .widget").map(function () {
+            return this.id;
+          }), self.get('controller.widgets'));
+          self.get('controller').saveWidgetLayout(widgets);
+        },
+        activate: function () {
+          self.set('isMoving', true);
+        },
+        deactivate: function () {
+          self.set('isMoving', false);
+        }
+      }).disableSelection();
+      $('html').off('DOMNodeInserted', '#widget_layout');
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/summary.js b/ambari-web/app/views/main/service/info/summary.js
index 91b2ca3..a4769e6 100644
--- a/ambari-web/app/views/main/service/info/summary.js
+++ b/ambari-web/app/views/main/service/info/summary.js
@@ -21,13 +21,10 @@ var misc = require('utils/misc');
 require('views/main/service/service');
 require('data/service_graph_config');
 
-App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin, {
+App.MainServiceInfoSummaryView = Em.View.extend({
   templateName: require('templates/main/service/info/summary'),
-  /**
-   * @property {Number} chunkSize - number of columns in Metrics section
-   */
-  chunkSize: 5,
-  attributes:null,
+
+  attributes: null,
 
   /**
    * Contain array with list of master components from <code>App.Service.hostComponets</code> which are
@@ -165,6 +162,47 @@ App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin,
     Em.run.once(self, 'setComponentsContent');
   }.observes('service.hostComponents.length', 'service.slaveComponents.@each.totalCount', 'service.clientComponents.@each.totalCount'),
 
+  loadServiceSummary: function () {
+    var serviceName = this.get('serviceName');
+    var serviceSummaryView = null;
+
+    if (!serviceName) {
+      return;
+    }
+
+    if (this.get('oldServiceName')) {
+      // do not delete it!
+      return;
+    }
+
+    var customServiceView = this.get('serviceCustomViewsMap')[serviceName];
+    if (customServiceView) {
+      serviceSummaryView = customServiceView.extend({
+        service: this.get('service')
+      });
+    } else {
+      serviceSummaryView = Em.View.extend(App.MainDashboardServiceViewWrapper, {
+        templateName: this.get('templatePathPrefix') + 'base'
+      });
+    }
+    this.set('serviceSummaryView', serviceSummaryView);
+    this.set('oldServiceName', serviceName);
+  }.observes('serviceName'),
+
+  didInsertElement: function () {
+    this._super();
+    var svcName = this.get('controller.content.serviceName');
+    this.set('service', this.getServiceModel(svcName));
+    App.loadTimer.finish('Service Summary Page');
+  },
+
+  willDestroyElement: function() {
+    this.set('service', null);
+    this.get('mastersObj').clear();
+    this.get('slavesObj').clear();
+    this.get('clientObj').clear();
+  },
+
   setComponentsContent: function() {
     Em.run.next(function() {
       if (Em.isNone(this.get('service'))) {
@@ -372,270 +410,5 @@ App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin,
 
   rollingRestartStaleConfigSlaveComponents: function (componentName) {
     batchUtils.launchHostComponentRollingRestart(componentName.context, this.get('service.displayName'), this.get('service.passiveState') === "ON", true);
-  },
-
-   /*
-   * Find the graph class associated with the graph name, and split
-   * the array into sections of 5 for displaying on the page
-   * (will only display rows with 5 items)
-   */
-  constructGraphObjects: function (graphNames) {
-    var self = this,
-        stackService = App.StackService.find(this.get('controller.content.serviceName'));
-
-    if (!graphNames && !stackService.get('isServiceWithWidgets')) {
-      this.get('serviceMetricGraphs').clear();
-      this.set('isServiceMetricLoaded', false);
-      return;
-    }
-
-    // load time range(currentTimeRangeIndex) for current service from server
-    this.getUserPref(self.get('persistKey')).complete(function () {
-      var result = [], graphObjects = [], chunkSize = self.get('chunkSize');
-      if (graphNames) {
-        graphNames.forEach(function (graphName) {
-          graphObjects.push(App["ChartServiceMetrics" + graphName].extend());
-        });
-      }
-      while (graphObjects.length) {
-        result.push(graphObjects.splice(0, chunkSize));
-      }
-      self.set('serviceMetricGraphs', result);
-      self.set('isServiceMetricLoaded', true);
-    });
-  },
-
-  /**
-   * Contains graphs for this particular service
-   */
-  serviceMetricGraphs: [],
-
-  /**
-   * @type {boolean}
-   * @default false
-   */
-  isServiceMetricLoaded: false,
-
-  /**
-   * Key-name to store time range in Persist
-   * @type {string}
-   */
-  persistKey: Em.computed.format('time-range-service-{0}', 'service.serviceName'),
-
-  getUserPrefSuccessCallback: function (response, request) {
-    if (response) {
-      this.set('currentTimeRangeIndex', response);
-    }
-  },
-
-  getUserPrefErrorCallback: function (request) {
-    if (request.status === 404) {
-      this.postUserPref(this.get('persistKey'), 0);
-      this.set('currentTimeRangeIndex', 0);
-    }
-  },
-
-  /**
-   * list of static actions of widget
-   * @type {Array}
-   */
-  staticGeneralWidgetActions: [
-    Em.Object.create({
-      label: Em.I18n.t('dashboard.widgets.actions.browse'),
-      class: 'glyphicon glyphicon-th',
-      action: 'goToWidgetsBrowser',
-      isAction: true
-    })
-  ],
-
-  /**
-   *list of static actions of widget accessible to Admin/Operator privelege
-   * @type {Array}
-   */
-
-  staticAdminPrivelegeWidgetActions: [
-    Em.Object.create({
-      label: Em.I18n.t('dashboard.widgets.create'),
-      class: 'glyphicon glyphicon-plus',
-      action: 'createWidget',
-      isAction: true
-    })
-  ],
-
-  /**
-   * List of static actions related to widget layout
-   */
-  staticWidgetLayoutActions: [
-    Em.Object.create({
-      label: Em.I18n.t('dashboard.widgets.layout.save'),
-      class: 'glyphicon glyphicon-download-alt',
-      action: 'saveLayout',
-      isAction: true
-    }),
-    Em.Object.create({
-      label: Em.I18n.t('dashboard.widgets.layout.import'),
-      class: 'glyphicon glyphicon-file',
-      isAction: true,
-      layouts: App.WidgetLayout.find()
-    })
-  ],
-
-  /**
-   * @type {Array}
-   */
-  widgetActions: function() {
-    var options = [];
-    if (App.isAuthorized('SERVICE.MODIFY_CONFIGS')) {
-      if (App.supports.customizedWidgetLayout) {
-        options.pushObjects(this.get('staticWidgetLayoutActions'));
-      }
-      options.pushObjects(this.get('staticAdminPrivelegeWidgetActions'));
-    }
-    options.pushObjects(this.get('staticGeneralWidgetActions'));
-    return options;
-  }.property(''),
-
-  /**
-   * call action function defined in controller
-   * @param event
-   */
-  doWidgetAction: function(event) {
-    if($.isFunction(this.get('controller')[event.context])) {
-      this.get('controller')[event.context].apply(this.get('controller'));
-    }
-  },
-
-  /**
-   * onclick handler for a time range option
-   * @param {object} event
-   */
-  setTimeRange: function (event) {
-    var graphs = this.get('controller.widgets').filterProperty('widgetType', 'GRAPH'),
-      callback = function () {
-        graphs.forEach(function (widget) {
-          widget.set('properties.time_range', event.context.value);
-        });
-      };
-    this._super(event, callback);
-
-    // Preset time range is specified by user
-    if (event.context.value !== '0') {
-      callback();
-    }
-  },
-
-  loadServiceSummary: function () {
-    var serviceName = this.get('serviceName');
-    var serviceSummaryView = null;
-
-    if (!serviceName) {
-      return;
-    }
-
-    if (this.get('oldServiceName')) {
-      // do not delete it!
-      return;
-    }
-
-    var customServiceView = this.get('serviceCustomViewsMap')[serviceName];
-    if (customServiceView) {
-      serviceSummaryView = customServiceView.extend({
-        service: this.get('service')
-      });
-    } else {
-      serviceSummaryView = Em.View.extend(App.MainDashboardServiceViewWrapper, {
-        templateName: this.get('templatePathPrefix') + 'base'
-      });
-    }
-    this.set('serviceSummaryView', serviceSummaryView);
-    this.set('oldServiceName', serviceName);
-  }.observes('serviceName'),
-
-
-  /**
-   * Service metrics panel not displayed when metrics service (ex:Ganglia) is not in stack definition.
-   *
-   * @type {boolean}
-   */
-  isNoServiceMetricsService: Em.computed.equal('App.services.serviceMetrics.length', 0),
-
-  didInsertElement: function () {
-    this._super();
-    var svcName = this.get('controller.content.serviceName');
-    this.set('service', this.getServiceModel(svcName));
-    var isMetricsSupported = svcName !== 'STORM' || App.get('isStormMetricsSupported');
-
-    this.get('controller').getActiveWidgetLayout();
-    if (App.get('supports.customizedWidgetLayout')) {
-      this.get('controller').loadWidgetLayouts();
-    }
-
-    if (svcName && isMetricsSupported) {
-      var allServices = require('data/service_graph_config');
-      this.constructGraphObjects(allServices[svcName.toLowerCase()]);
-    }
-    this.makeSortable();
-    this.addWidgetTooltip();
-    App.loadTimer.finish('Service Summary Page');
-  },
-
-  addWidgetTooltip: function() {
-    Em.run.later(this, function () {
-      App.tooltip($("[rel='add-widget-tooltip']"));
-      // enable description show up on hover
-      $('.img-thumbnail').hoverIntent(function() {
-        if ($(this).is('hover')) {
-          $(this).find('.hidden-description').delay(1000).fadeIn(200).end();
-        }
-      }, function() {
-        $(this).find('.hidden-description').stop().hide().end();
-      });
-    }, 1000);
-  },
-
-  willDestroyElement: function() {
-    $("[rel='add-widget-tooltip']").tooltip('destroy');
-    $('.img-thumbnail').off();
-    $('#widget_layout').sortable('destroy');
-    $('.widget.span2p4').detach().remove();
-    this.get('serviceMetricGraphs').clear();
-    this.set('service', null);
-    this.get('mastersObj').clear();
-    this.get('slavesObj').clear();
-    this.get('clientObj').clear();
-  },
-
-  /**
-   * Define if some widget is currently moving
-   * @type {boolean}
-   */
-  isMoving: false,
-
-  /**
-   * Make widgets' list sortable on New Dashboard style
-   */
-  makeSortable: function () {
-    var self = this;
-    $('html').on('DOMNodeInserted', '#widget_layout', function () {
-      $(this).sortable({
-        items: "> div",
-        cursor: "move",
-        tolerance: "pointer",
-        scroll: false,
-        update: function () {
-          var widgets = misc.sortByOrder($("#widget_layout .widget").map(function () {
-            return this.id;
-          }), self.get('controller.widgets'));
-          self.get('controller').saveWidgetLayout(widgets);
-        },
-        activate: function () {
-          self.set('isMoving', true);
-        },
-        deactivate: function () {
-          self.set('isMoving', false);
-        }
-      }).disableSelection();
-      $('html').off('DOMNodeInserted', '#widget_layout');
-    });
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 43d75e6..45c783b 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -326,6 +326,12 @@ App.MainServiceItemView = Em.View.extend({
     return App.get('services.servicesWithHeatmapTab').contains(this.get('controller.content.serviceName'));
   }.property('controller.content.serviceName', 'App.services.servicesWithHeatmapTab'),
 
+  hasMetricTab: function() {
+    let serviceName = this.get('controller.content.serviceName');
+    let graphs = require('data/service_graph_config')[serviceName.toLowerCase()];
+    return graphs || App.StackService.find(serviceName).get('isServiceWithWidgets');
+  }.property('controller.content.serviceName'),
+
   didInsertElement: function () {
     this.get('controller').setStartStopState();
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/controllers/main/service/info/metric_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/info/metric_test.js b/ambari-web/test/controllers/main/service/info/metric_test.js
new file mode 100644
index 0000000..5ef6279
--- /dev/null
+++ b/ambari-web/test/controllers/main/service/info/metric_test.js
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('controllers/main/service/info/metric');
+var testHelpers = require('test/helpers');
+function getController() {
+  return App.MainServiceInfoMetricsController.create();
+}
+
+describe('App.MainServiceInfoMetricsController', function () {
+
+  var controller;
+
+  beforeEach(function () {
+    controller = App.MainServiceInfoMetricsController.create();
+  });
+
+  App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isServiceWithEnhancedWidgets', 'someWidgetGraphExists']);
+
+
+  describe("#getActiveWidgetLayout() for Enhanced Dashboard", function () {
+
+    it("make GET call", function () {
+      controller.reopen({
+        isServiceWithEnhancedWidgets: true,
+        content: Em.Object.create({serviceName: 'HDFS'})
+      });
+      controller.getActiveWidgetLayout();
+      expect(testHelpers.findAjaxRequest('name', 'widgets.layouts.active.get')).to.exists;
+    });
+  });
+
+  describe("#getActiveWidgetLayoutSuccessCallback()", function () {
+    beforeEach(function () {
+      sinon.stub( App.widgetLayoutMapper, 'map');
+      sinon.stub( App.widgetMapper, 'map');
+    });
+    afterEach(function () {
+      App.widgetLayoutMapper.map.restore();
+      App.widgetMapper.map.restore();
+    });
+    it("isWidgetLayoutsLoaded should be set to true", function () {
+      controller.reopen({
+        isServiceWithEnhancedWidgets: true,
+        content: Em.Object.create({serviceName: 'HDFS'})
+      });
+      controller.getActiveWidgetLayoutSuccessCallback({items:[{
+        WidgetLayoutInfo: {}
+      }]});
+      expect(controller.get('isWidgetsLoaded')).to.be.true;
+    });
+
+  });
+
+  describe("#hideWidgetSuccessCallback()", function () {
+    beforeEach(function () {
+      sinon.stub(App.widgetLayoutMapper, 'map');
+      sinon.stub(controller, 'propertyDidChange');
+      var params = {
+        data: {
+          WidgetLayoutInfo: {
+            widgets: [
+              {id: 1}
+            ]
+          }
+        }
+      };
+      controller.hideWidgetSuccessCallback({}, {}, params);
+    });
+    afterEach(function () {
+      App.widgetLayoutMapper.map.restore();
+      controller.propertyDidChange.restore();
+    });
+    it("mapper is called with valid data", function () {
+      expect(App.widgetLayoutMapper.map.calledWith({
+        items: [{
+          WidgetLayoutInfo: {
+            widgets: [
+              {
+                WidgetInfo: {
+                  id: 1
+                }
+              }
+            ]
+          }
+        }]
+      })).to.be.true;
+    });
+    it('`widgets` is forced to be recalculated', function () {
+      expect(controller.propertyDidChange.calledWith('widgets')).to.be.true;
+    });
+  });
+
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/controllers/main/service/info/summary_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/info/summary_test.js b/ambari-web/test/controllers/main/service/info/summary_test.js
index 51dd595..e5cc32a 100644
--- a/ambari-web/test/controllers/main/service/info/summary_test.js
+++ b/ambari-web/test/controllers/main/service/info/summary_test.js
@@ -18,7 +18,6 @@
 
 var App = require('app');
 require('controllers/main/service/info/summary');
-var testHelpers = require('test/helpers');
 function getController() {
   return App.MainServiceInfoSummaryController.create();
 }
@@ -31,8 +30,6 @@ describe('App.MainServiceInfoSummaryController', function () {
     controller = App.MainServiceInfoSummaryController.create();
   });
 
-App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isServiceWithEnhancedWidgets', 'someWidgetGraphExists']);
-
   describe('#setRangerPlugins', function () {
 
     var cases = [
@@ -184,77 +181,4 @@ App.TestAliases.testAsComputedOr(getController(), 'showTimeRangeControl', ['!isS
 
   });
 
-  describe("#getActiveWidgetLayout() for Enhanced Dashboard", function () {
-
-    it("make GET call", function () {
-      var _controller = App.MainServiceInfoSummaryController.create({
-        isServiceWithEnhancedWidgets: true,
-        content: Em.Object.create({serviceName: 'HDFS'})
-      });
-      _controller.getActiveWidgetLayout();
-      expect(testHelpers.findAjaxRequest('name', 'widgets.layouts.active.get')).to.exists;
-    });
-  });
-
-  describe("#getActiveWidgetLayoutSuccessCallback()", function () {
-    beforeEach(function () {
-      sinon.stub( App.widgetLayoutMapper, 'map');
-      sinon.stub( App.widgetMapper, 'map');
-    });
-    afterEach(function () {
-      App.widgetLayoutMapper.map.restore();
-      App.widgetMapper.map.restore();
-    });
-    it("isWidgetLayoutsLoaded should be set to true", function () {
-      var _controller = App.MainServiceInfoSummaryController.create({
-        isServiceWithEnhancedWidgets: true,
-        content: Em.Object.create({serviceName: 'HDFS'})
-      });
-      _controller.getActiveWidgetLayoutSuccessCallback({items:[{
-        WidgetLayoutInfo: {}
-      }]});
-      expect(_controller.get('isWidgetsLoaded')).to.be.true;
-    });
-
-  });
-
-  describe("#hideWidgetSuccessCallback()", function () {
-    beforeEach(function () {
-      sinon.stub(App.widgetLayoutMapper, 'map');
-      sinon.stub(controller, 'propertyDidChange');
-      var params = {
-        data: {
-          WidgetLayoutInfo: {
-            widgets: [
-              {id: 1}
-            ]
-          }
-        }
-      };
-      controller.hideWidgetSuccessCallback({}, {}, params);
-    });
-    afterEach(function () {
-      App.widgetLayoutMapper.map.restore();
-      controller.propertyDidChange.restore();
-    });
-    it("mapper is called with valid data", function () {
-      expect(App.widgetLayoutMapper.map.calledWith({
-        items: [{
-          WidgetLayoutInfo: {
-            widgets: [
-              {
-                WidgetInfo: {
-                  id: 1
-                }
-              }
-            ]
-          }
-        }]
-      })).to.be.true;
-    });
-    it('`widgets` is forced to be recalculated', function () {
-      expect(controller.propertyDidChange.calledWith('widgets')).to.be.true;
-    });
-  });
-
 });
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eb273e1/ambari-web/test/views/main/service/info/metrics_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics_view_test.js b/ambari-web/test/views/main/service/info/metrics_view_test.js
new file mode 100644
index 0000000..916d451
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics_view_test.js
@@ -0,0 +1,334 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+var App = require('app');
+require('views/main/service/info/metrics_view');
+
+describe('App.MainServiceInfoMetricsView', function() {
+
+  var view = App.MainServiceInfoMetricsView.create({
+    controller: Em.Object.create({
+      content: Em.Object.create({
+        id: 'HDFS',
+        serviceName: 'HDFS',
+        hostComponents: []
+      }),
+      getActiveWidgetLayout: Em.K,
+      loadWidgetLayouts: Em.K
+    }),
+    service: Em.Object.create()
+  });
+
+  describe("#getServiceModel()", function() {
+
+    beforeEach(function() {
+      sinon.stub(App.Service, 'find').returns({serviceName: 'S1'});
+      sinon.stub(App.HDFSService, 'find').returns([{serviceName: 'HDFS'}]);
+    });
+    afterEach(function() {
+      App.Service.find.restore();
+      App.HDFSService.find.restore();
+    });
+
+    it("HDFS service", function() {
+      expect(view.getServiceModel('HDFS')).to.eql({serviceName: 'HDFS'});
+    });
+
+    it("Simple model service", function() {
+      expect(view.getServiceModel('S1')).to.eql({serviceName: 'S1'});
+    });
+  });
+
+  describe("#constructGraphObjects()", function() {
+    var mock = Em.Object.create({
+      isServiceWithWidgets: false
+    });
+
+    beforeEach(function() {
+      sinon.stub(App.StackService, 'find').returns(mock);
+      sinon.stub(view, 'getUserPref').returns({
+        complete: function(callback){callback();}
+      })
+    });
+    afterEach(function() {
+      App.StackService.find.restore();
+      view.getUserPref.restore();
+    });
+
+    it("metrics not loaded", function() {
+      mock.set('isServiceWithWidgets', false);
+      view.constructGraphObjects(null);
+      expect(view.get('serviceHasMetrics')).to.be.false;
+      expect(view.getUserPref.called).to.be.false;
+    });
+
+    it("metrics loaded", function() {
+      App.ChartServiceMetricsG1 = Em.Object.extend();
+      mock.set('isServiceWithWidgets', true);
+      view.constructGraphObjects(['G1']);
+      expect(view.get('serviceHasMetrics')).to.be.true;
+      expect(view.getUserPref.calledOnce).to.be.true;
+      expect(view.get('serviceMetricGraphs')).to.not.be.empty;
+    });
+  });
+
+  describe("#getUserPrefSuccessCallback()", function() {
+
+    it("currentTimeRangeIndex should be set", function() {
+      view.getUserPrefSuccessCallback(1);
+      expect(view.get('currentTimeRangeIndex')).to.equal(1);
+    });
+  });
+
+  describe("#getUserPrefErrorCallback()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view, 'postUserPref');
+    });
+    afterEach(function() {
+      view.postUserPref.restore();
+    });
+
+    it("request.status = 404", function() {
+      view.getUserPrefErrorCallback({status: 404});
+      expect(view.get('currentTimeRangeIndex')).to.equal(0);
+      expect(view.postUserPref.calledOnce).to.be.true;
+    });
+
+    it("request.status = 403", function() {
+      view.getUserPrefErrorCallback({status: 403});
+      expect(view.postUserPref.called).to.be.false;
+    });
+  });
+
+  describe("#widgetActions", function() {
+
+    beforeEach(function() {
+      this.mock = sinon.stub(App, 'isAuthorized');
+      view.setProperties({
+        staticWidgetLayoutActions: [{id: 1}],
+        staticAdminPrivelegeWidgetActions: [{id: 2}],
+        staticGeneralWidgetActions: [{id: 3}]
+      });
+    });
+    afterEach(function() {
+      this.mock.restore();
+    });
+
+    it("not authorized", function() {
+      this.mock.returns(false);
+      view.propertyDidChange('widgetActions');
+      expect(view.get('widgetActions').mapProperty('id')).to.eql([3]);
+    });
+
+    it("is authorized", function() {
+      this.mock.returns(true);
+      App.supports.customizedWidgetLayout = true;
+      view.propertyDidChange('widgetActions');
+      expect(view.get('widgetActions').mapProperty('id')).to.eql([1, 2, 3]);
+    });
+  });
+
+  describe("#doWidgetAction()", function() {
+
+    beforeEach(function() {
+      view.set('controller.action1', Em.K);
+      sinon.stub(view.get('controller'), 'action1');
+    });
+    afterEach(function() {
+      view.get('controller').action1.restore();
+    });
+
+    it("action exist", function() {
+      view.doWidgetAction({context: 'action1'});
+      expect(view.get('controller').action1.calledOnce).to.be.true;
+    });
+  });
+
+  describe("#setTimeRange", function() {
+
+    it("range = 0", function() {
+      var widget = Em.Object.create({
+        widgetType: 'GRAPH',
+        properties: {
+          time_range: '0'
+        }
+      });
+      view.set('controller.widgets', [widget]);
+      view.setTimeRange({context: {value: '0'}});
+      expect(widget.get('properties').time_range).to.be.equal('0')
+    });
+
+    it("range = 1", function() {
+      var widget = Em.Object.create({
+        widgetType: 'GRAPH',
+        properties: {
+          time_range: 0
+        }
+      });
+      view.set('controller.widgets', [widget]);
+      view.setTimeRange({context: {value: '1'}});
+      expect(widget.get('properties').time_range).to.be.equal('1')
+    });
+  });
+
+  describe("#makeSortable()", function() {
+    var mock = {
+      on: function(arg1, arg2, callback) {
+        callback();
+      },
+      off: Em.K,
+      sortable: function() {
+        return {
+          disableSelection: Em.K
+        }
+      }
+    };
+
+    beforeEach(function() {
+      sinon.stub(window, '$').returns(mock);
+      sinon.spy(mock, 'on');
+      sinon.spy(mock, 'off');
+      sinon.spy(mock, 'sortable');
+      view.makeSortable();
+    });
+    afterEach(function() {
+      window.$.restore();
+      mock.on.restore();
+      mock.off.restore();
+      mock.sortable.restore();
+    });
+
+    it("on() should be called", function() {
+      expect(mock.on.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
+    });
+
+    it("sortable() should be called", function() {
+      expect(mock.sortable.calledOnce).to.be.true;
+    });
+
+    it("off() should be called", function() {
+      expect(mock.off.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true;
+    });
+  });
+
+  describe('#didInsertElement', function () {
+
+    beforeEach(function () {
+      sinon.stub(view, 'constructGraphObjects', Em.K);
+      this.mock = sinon.stub(App, 'get');
+      sinon.stub(view, 'getServiceModel');
+      sinon.stub(view.get('controller'), 'getActiveWidgetLayout');
+      sinon.stub(view.get('controller'), 'loadWidgetLayouts');
+      sinon.stub(view, 'makeSortable');
+      sinon.stub(view, 'addWidgetTooltip');
+
+    });
+
+    afterEach(function () {
+      view.constructGraphObjects.restore();
+      this.mock.restore();
+      view.getServiceModel.restore();
+      view.get('controller').getActiveWidgetLayout.restore();
+      view.get('controller').loadWidgetLayouts.restore();
+      view.makeSortable.restore();
+      view.addWidgetTooltip.restore();
+    });
+
+    it("getServiceModel should be called", function() {
+      view.didInsertElement();
+      expect(view.getServiceModel.calledOnce).to.be.true;
+    });
+    it("addWidgetTooltip should be called", function() {
+      view.didInsertElement();
+      expect(view.addWidgetTooltip.calledOnce).to.be.true;
+    });
+    it("makeSortable should be called", function() {
+      view.didInsertElement();
+      expect(view.makeSortable.calledOnce).to.be.true;
+    });
+    it("getActiveWidgetLayout should be called", function() {
+      view.didInsertElement();
+      expect(view.get('controller').getActiveWidgetLayout.calledOnce).to.be.true;
+    });
+
+    describe("serviceName is null, metrics not supported, widgets not supported", function() {
+      beforeEach(function () {
+        view.set('controller.content.serviceName', null);
+        this.mock.returns(false);
+        view.didInsertElement();
+      });
+
+      it("loadWidgetLayouts should not be called", function() {
+        expect(view.get('controller').loadWidgetLayouts.called).to.be.false;
+      });
+      it("constructGraphObjects should not be called", function() {
+        expect(view.constructGraphObjects.called).to.be.false;
+      });
+    });
+
+    describe("serviceName is set, metrics is supported, widgets is supported", function() {
+      beforeEach(function () {
+        view.set('controller.content.serviceName', 'S1');
+        this.mock.returns(true);
+        view.didInsertElement();
+      });
+
+      it("loadWidgetLayouts should be called", function() {
+        expect(view.get('controller').loadWidgetLayouts.calledOnce).to.be.true;
+      });
+      it("constructGraphObjects should be called", function() {
+        expect(view.constructGraphObjects.calledOnce).to.be.true;
+      });
+    });
+  });
+
+  describe("#addWidgetTooltip()", function() {
+    var mock = {
+      hoverIntent: Em.K
+    };
+
+    beforeEach(function() {
+      sinon.stub(Em.run, 'later', function(arg1, callback) {
+        callback();
+      });
+      sinon.stub(App, 'tooltip');
+      sinon.stub(window, '$').returns(mock);
+      sinon.spy(mock, 'hoverIntent');
+      view.addWidgetTooltip();
+    });
+    afterEach(function() {
+      Em.run.later.restore();
+      App.tooltip.restore();
+      window.$.restore();
+      mock.hoverIntent.restore();
+    });
+
+    it("Em.run.later should be called", function() {
+      expect(Em.run.later.calledOnce).to.be.true;
+    });
+    it("App.tooltip should be called", function() {
+      expect(App.tooltip.calledOnce).to.be.true;
+    });
+    it("hoverIntent should be called", function() {
+      expect(mock.hoverIntent.calledOnce).to.be.true;
+    });
+  });
+
+});
\ No newline at end of file


[15/50] [abbrv] ambari git commit: AMBARI-22164: Update services API to user service_name, service_type instead of service_display_name, service_name (jluniya)

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index c01b901..f7559c6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -88,7 +88,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
   protected static final String COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_name";
   protected static final String COMPONENT_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id";
   protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name";
-  protected static final String COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_display_name";
+  protected static final String COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type";
   protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name";
   protected static final String COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name";
   protected static final String COMPONENT_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "state";
@@ -114,7 +114,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
   private static Set<String> pkPropertyIds = Sets.newHashSet(
           COMPONENT_CLUSTER_NAME_PROPERTY_ID,
           COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID,
-          COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID,
+          COMPONENT_SERVICE_NAME_PROPERTY_ID,
           COMPONENT_COMPONENT_NAME_PROPERTY_ID);
 
   /**
@@ -135,7 +135,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     PROPERTY_IDS.add(COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID);
     PROPERTY_IDS.add(COMPONENT_SERVICE_ID_PROPERTY_ID);
     PROPERTY_IDS.add(COMPONENT_SERVICE_NAME_PROPERTY_ID);
-    PROPERTY_IDS.add(COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID);
+    PROPERTY_IDS.add(COMPONENT_SERVICE_TYPE_PROPERTY_ID);
     PROPERTY_IDS.add(COMPONENT_COMPONENT_NAME_PROPERTY_ID);
     PROPERTY_IDS.add(COMPONENT_DISPLAY_NAME_PROPERTY_ID);
     PROPERTY_IDS.add(COMPONENT_STATE_PROPERTY_ID);
@@ -158,7 +158,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     // keys
     KEY_PROPERTY_IDS.put(Resource.Type.Component, COMPONENT_COMPONENT_NAME_PROPERTY_ID);
     KEY_PROPERTY_IDS.put(Resource.Type.ServiceGroup, COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID);
-    KEY_PROPERTY_IDS.put(Resource.Type.Service, COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID);
+    KEY_PROPERTY_IDS.put(Resource.Type.Service, COMPONENT_SERVICE_NAME_PROPERTY_ID);
     KEY_PROPERTY_IDS.put(Resource.Type.Cluster, COMPONENT_CLUSTER_NAME_PROPERTY_ID);
   }
 
@@ -217,7 +217,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
         resource.setProperty(COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName());
         resource.setProperty(COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId());
         resource.setProperty(COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName());
-        resource.setProperty(COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID, response.getServiceDisplayName());
+        resource.setProperty(COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType());
         resource.setProperty(COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName());
         resource.setProperty(COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName());
         resource.setProperty(COMPONENT_STATE_PROPERTY_ID, response.getDesiredState());
@@ -264,7 +264,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       setResourceProperty(resource, COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName(), requestedIds);
       setResourceProperty(resource, COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId(), requestedIds);
       setResourceProperty(resource, COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
-      setResourceProperty(resource, COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID, response.getServiceDisplayName(), requestedIds);
+      setResourceProperty(resource, COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds);
       setResourceProperty(resource, COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
       setResourceProperty(resource, COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds);
       setResourceProperty(resource, COMPONENT_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
@@ -356,7 +356,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     return new ServiceComponentRequest(
         (String) properties.get(COMPONENT_CLUSTER_NAME_PROPERTY_ID),
         (String) properties.get(COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID),
-        (String) properties.get(COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID),
+        (String) properties.get(COMPONENT_SERVICE_NAME_PROPERTY_ID),
         (String) properties.get(COMPONENT_COMPONENT_NAME_PROPERTY_ID),
         (String) properties.get(COMPONENT_STATE_PROPERTY_ID),
         (String) properties.get(COMPONENT_RECOVERY_ENABLED_ID),
@@ -384,7 +384,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     for (ServiceComponentRequest request : requests) {
       Validate.notEmpty(request.getComponentName(), "component name should be non-empty");
       Validate.notEmpty(request.getServiceGroupName(), "service group name should be non-empty");
-      Validate.notEmpty(request.getServiceDisplayName(), "service display name should be non-empty");
+      Validate.notEmpty(request.getServiceName(), "service name should be non-empty");
       Cluster cluster = getClusterForRequest(request, clusters);
 
       isAuthorized(cluster, getRequiredCreateAuthorizations());
@@ -396,16 +396,16 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       }
 
       Map<String, Set<String>> serviceComponents = componentNames.get(request.getClusterName());
-      if (!serviceComponents.containsKey(request.getServiceDisplayName())) {
-        serviceComponents.put(request.getServiceDisplayName(), new HashSet<String>());
+      if (!serviceComponents.containsKey(request.getServiceName())) {
+        serviceComponents.put(request.getServiceName(), new HashSet<String>());
       }
 
-      if (serviceComponents.get(request.getServiceDisplayName()).contains(request.getComponentName())) {
+      if (serviceComponents.get(request.getServiceName()).contains(request.getComponentName())) {
         // throw error later for dup
         duplicates.add(request.toString());
         continue;
       }
-      serviceComponents.get(request.getServiceDisplayName()).add(request.getComponentName());
+      serviceComponents.get(request.getServiceName()).add(request.getComponentName());
 
       if (StringUtils.isNotEmpty(request.getDesiredState())) {
         Validate.isTrue(State.INIT == State.valueOf(request.getDesiredState()),
@@ -427,7 +427,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
 
       StackId stackId = s.getDesiredStackId();
       if (!ambariMetaInfo.isValidServiceComponent(stackId.getStackName(),
-          stackId.getStackVersion(), s.getName(), request.getComponentName())) {
+          stackId.getStackVersion(), s.getServiceType(), request.getComponentName())) {
         throw new IllegalArgumentException("Unsupported or invalid component"
             + " in stack stackInfo=" + stackId.getStackId()
             + " request=" + request);
@@ -448,7 +448,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     // now doing actual work
     for (ServiceComponentRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceDisplayName());
+      Service s = cluster.getService(request.getServiceName());
       ServiceComponent sc = serviceComponentFactory.createNew(s, request.getComponentName());
       sc.setDesiredRepositoryVersion(s.getDesiredRepositoryVersion());
 
@@ -545,7 +545,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     }
 
     Set<Service> services = new HashSet<>();
-    if (StringUtils.isNotEmpty(request.getServiceDisplayName())) {
+    if (StringUtils.isNotEmpty(request.getServiceName())) {
       services.add(getServiceFromCluster(request, cluster));
     } else {
       services.addAll(cluster.getServices().values());
@@ -624,7 +624,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       final Cluster cluster = getClusterForRequest(request, clusters);
       final String clusterName = request.getClusterName();
       final String serviceGroupName = request.getServiceGroupName();
-      final String serviceName = request.getServiceDisplayName();
+      final String serviceName = request.getServiceName();
       final String componentName = request.getComponentName();
 
       LOG.info("Received a updateComponent request: {}", request);
@@ -876,7 +876,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
 
   private Service getServiceFromCluster(final ServiceComponentRequest request, final Cluster cluster) throws AmbariException {
     try {
-      return cluster.getService(request.getServiceDisplayName());
+      return cluster.getService(request.getServiceName());
     } catch (ServiceNotFoundException e) {
       throw new ParentObjectNotFoundException("Parent Service resource doesn't exist.", e);
     }
@@ -906,7 +906,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
   private void setServiceNameIfAbsent(final ServiceComponentRequest request,
                                       final Cluster cluster,
                                       final AmbariMetaInfo ambariMetaInfo) throws AmbariException {
-    if (StringUtils.isEmpty(request.getServiceDisplayName())) {
+    if (StringUtils.isEmpty(request.getServiceName())) {
 
       String componentName = request.getComponentName();
 
@@ -919,7 +919,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
                 + ", componentName=" + request.getComponentName()
                 + ", clusterName=" + cluster.getClusterName());
       }
-      request.setServiceDisplayName(serviceName);
+      request.setServiceName(serviceName);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 2e303fd..8f1dc7c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -89,7 +89,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   public static final String HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_name";
   public static final String HOST_COMPONENT_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id";
   public static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name";
-  public static final String HOST_COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_display_name";
+  public static final String HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type";
   public static final String HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "id";
   public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name";
   public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name";
@@ -112,7 +112,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     new HashSet<>(Arrays.asList(new String[]{
       HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
       HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID,
-      HOST_COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID,
+      HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
       HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
       HOST_COMPONENT_HOST_NAME_PROPERTY_ID}));
 
@@ -180,7 +180,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         resource.setProperty(HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName());
         resource.setProperty(HOST_COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId());
         resource.setProperty(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName());
-        resource.setProperty(HOST_COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID, response.getServiceDisplayName());
+        resource.setProperty(HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType());
         resource.setProperty(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId());
         resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName());
         resource.setProperty(HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName());
@@ -252,7 +252,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       setResourceProperty(resource, HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID, response.getServiceDisplayName(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds);
@@ -520,12 +520,12 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         }
       }
 
-      if (StringUtils.isEmpty(request.getServiceDisplayName())) {
-        request.setServiceDisplayName(getManagementController().findService(cluster, request.getComponentName()));
+      if (StringUtils.isEmpty(request.getServiceName())) {
+        request.setServiceName(getManagementController().findService(cluster, request.getComponentName()));
       }
 
       ServiceComponent sc = getServiceComponent(
-          request.getClusterName(), request.getServiceGroupName(), request.getServiceDisplayName(), request.getComponentName());
+          request.getClusterName(), request.getServiceGroupName(), request.getServiceName(), request.getComponentName());
 
       logRequestInfo("Received a updateHostComponent request", request);
 
@@ -547,10 +547,10 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         requestClusters.put(request.getClusterName(), clusterServices);
       }
 
-      Map<String, Set<String>> serviceComponents = clusterServices.get(request.getServiceDisplayName());
+      Map<String, Set<String>> serviceComponents = clusterServices.get(request.getServiceName());
       if (serviceComponents == null) {
         serviceComponents = new HashMap<>();
-        clusterServices.put(request.getServiceDisplayName(), serviceComponents);
+        clusterServices.put(request.getServiceName(), serviceComponents);
       }
 
       Set<String> componentHosts = serviceComponents.get(request.getComponentName());
@@ -730,7 +730,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     ServiceComponentHostRequest serviceComponentHostRequest = new ServiceComponentHostRequest(
             (String) properties.get(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID),
             (String) properties.get(HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID),
-            (String) properties.get(HOST_COMPONENT_SERVICE_DISPLAY_NAME_PROPERTY_ID),
+            (String) properties.get(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID),
             (String) properties.get(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID),
             (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID),
             (String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID));
@@ -947,12 +947,11 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
    * @param request  the request to log
    */
   private void logRequestInfo(String msg, ServiceComponentHostRequest request) {
-    LOG.info("{}, clusterName={}, serviceGroupName={}, serviceName={}, serviceDisplayName={}, componentName={}, hostname={}, request={}",
+    LOG.info("{}, clusterName={}, serviceGroupName={}, serviceName={}, componentName={}, hostname={}, request={}",
         msg,
         request.getClusterName(),
         request.getServiceGroupName(),
-        request.getServiceDisplayName(),
-        request.getServiceDisplayName(),
+        request.getServiceName(),
         request.getComponentName(),
         request.getHostname(),
         request);
@@ -971,8 +970,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     sb.append(msg)
       .append(", clusterName=").append(request.getClusterName())
       .append(", serviceGroupName=").append(request.getServiceGroupName())
-      .append(", serviceName=").append(request.getServiceDisplayName())
-      .append(", serviceDisplayName=").append(request.getServiceDisplayName())
+      .append(", serviceName=").append(request.getServiceName())
       .append(", componentName=").append(request.getComponentName())
       .append(", hostname=").append(request.getHostname())
       .append(", currentState=").append(oldState == null ? "null" : oldState)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index f8eb878..252d772 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -97,7 +97,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
   public static final String SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_name";
   public static final String SERVICE_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id";
   public static final String SERVICE_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name";
-  public static final String SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_display_name";
+  public static final String SERVICE_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type";
   public static final String SERVICE_SERVICE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "state";
   public static final String SERVICE_MAINTENANCE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "maintenance_state";
   public static final String SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "credential_store_supported";
@@ -116,7 +116,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     new HashSet<>(Arrays.asList(new String[]{
       SERVICE_CLUSTER_NAME_PROPERTY_ID,
       SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID,
-      SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID}));
+      SERVICE_SERVICE_NAME_PROPERTY_ID}));
 
   /**
    * The property ids for an service resource.
@@ -136,7 +136,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     PROPERTY_IDS.add(SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID);
     PROPERTY_IDS.add(SERVICE_SERVICE_ID_PROPERTY_ID);
     PROPERTY_IDS.add(SERVICE_SERVICE_NAME_PROPERTY_ID);
-    PROPERTY_IDS.add(SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID);
+    PROPERTY_IDS.add(SERVICE_SERVICE_TYPE_PROPERTY_ID);
     PROPERTY_IDS.add(SERVICE_SERVICE_STATE_PROPERTY_ID);
     PROPERTY_IDS.add(SERVICE_MAINTENANCE_STATE_PROPERTY_ID);
     PROPERTY_IDS.add(SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID);
@@ -151,7 +151,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     PROPERTY_IDS.add(QUERY_PARAMETERS_START_DEPENDENCIES);
 
     // keys
-    KEY_PROPERTY_IDS.put(Resource.Type.Service, SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID);
+    KEY_PROPERTY_IDS.put(Resource.Type.Service, SERVICE_SERVICE_NAME_PROPERTY_ID);
     KEY_PROPERTY_IDS.put(Resource.Type.ServiceGroup, SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID);
     KEY_PROPERTY_IDS.put(Resource.Type.Cluster, SERVICE_CLUSTER_NAME_PROPERTY_ID);
   }
@@ -223,7 +223,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         resource.setProperty(SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName());
         resource.setProperty(SERVICE_SERVICE_ID_PROPERTY_ID, response.getServiceId());
         resource.setProperty(SERVICE_SERVICE_NAME_PROPERTY_ID, response.getServiceName());
-        resource.setProperty(SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID, response.getServiceDisplayName());
+        resource.setProperty(SERVICE_SERVICE_TYPE_PROPERTY_ID, response.getServiceType());
         resource.setProperty(SERVICE_SERVICE_STATE_PROPERTY_ID, response.getDesiredState());
         resource.setProperty(SERVICE_MAINTENANCE_STATE_PROPERTY_ID, response.getMaintenanceState());
         resource.setProperty(SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID, response.getDesiredRepositoryVersion());
@@ -273,8 +273,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         response.getServiceId(), requestedIds);
       setResourceProperty(resource, SERVICE_SERVICE_NAME_PROPERTY_ID,
           response.getServiceName(), requestedIds);
-      setResourceProperty(resource, SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID,
-        response.getServiceDisplayName(), requestedIds);
+      setResourceProperty(resource, SERVICE_SERVICE_TYPE_PROPERTY_ID,
+        response.getServiceType(), requestedIds);
       setResourceProperty(resource, SERVICE_SERVICE_STATE_PROPERTY_ID,
           calculateServiceState(response.getClusterName(), response.getServiceName()),
           requestedIds);
@@ -350,7 +350,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
     notifyDelete(Resource.Type.Service, predicate);
     for(ServiceRequest serviceReq : requests) {
-      deleteStatusMetaData.addDeletedKey("service_name: "+serviceReq.getServiceDisplayName());
+      deleteStatusMetaData.addDeletedKey("service_name: " + serviceReq.getServiceName());
     }
     return getRequestStatus(null, null, deleteStatusMetaData);
   }
@@ -433,7 +433,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         (String) properties.get(SERVICE_CLUSTER_NAME_PROPERTY_ID),
         (String) properties.get(SERVICE_SERVICE_GROUP_NAME_PROPERTY_ID),
         (String) properties.get(SERVICE_SERVICE_NAME_PROPERTY_ID),
-        (String) properties.get(SERVICE_SERVICE_DISPLAY_NAME_PROPERTY_ID),
+        (String) properties.get(SERVICE_SERVICE_TYPE_PROPERTY_ID),
         null == desiredRepoId ? null : Long.valueOf(desiredRepoId),
         (String) properties.get(SERVICE_SERVICE_STATE_PROPERTY_ID),
         (String) properties.get(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID),
@@ -470,8 +470,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       Cluster cluster = clusters.getCluster(request.getClusterName());
       ServiceGroup sg = cluster.getServiceGroup(request.getServiceGroupName());
 
-      if(StringUtils.isBlank(request.getServiceDisplayName())) {
-        request.setServiceDisplayName(request.getServiceName());
+      if(StringUtils.isBlank(request.getServiceType())) {
+        request.setServiceType(request.getServiceName());
       }
 
       RepositoryVersionEntity repositoryVersion = request.getResolvedRepository();
@@ -480,7 +480,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         throw new AmbariException("Could not find any repository on the request.");
       }
 
-      Service s = cluster.addService(sg, request.getServiceName(), request.getServiceName(), repositoryVersion);
+      Service s = cluster.addService(sg, request.getServiceName(), request.getServiceType(), repositoryVersion);
       if (repositoryVersion.getType() != RepositoryType.STANDARD
           && cluster.getProvisioningState() == State.INIT) {
         throw new AmbariException(String.format(
@@ -495,13 +495,13 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       StackId stackId = repositoryVersion.getStackId();
       AmbariMetaInfo ambariMetaInfo = getManagementController().getAmbariMetaInfo();
       ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-          stackId.getStackVersion(), request.getServiceDisplayName());
+          stackId.getStackVersion(), request.getServiceType());
 
       boolean credentialStoreSupported = serviceInfo.isCredentialStoreSupported();
       boolean credentialStoreRequired = serviceInfo.isCredentialStoreRequired();
 
-      LOG.info("Service: {}, credential_store_supported = {} and credential_store_required = {} from stack definition",
-               request.getServiceDisplayName(), credentialStoreSupported, credentialStoreRequired);
+      LOG.info("Service: service_name = {}, service_type = {}, credential_store_supported = {} and credential_store_required = {} from stack definition",
+               request.getServiceName(), request.getServiceType(), credentialStoreSupported, credentialStoreRequired);
       /*
        * If request does not have credential_store_enabled field,
        * then get the default from the stack definition.
@@ -510,16 +510,16 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         boolean credentialStoreEnabled = Boolean.parseBoolean(request.getCredentialStoreEnabled());
         boolean enableCredStore = credentialStoreSupported && (credentialStoreRequired || credentialStoreEnabled);
         s.setCredentialStoreEnabled(enableCredStore);
-        LOG.info("Service: {}, credential_store_enabled = {} from request and resulting" +
+        LOG.info("Service: service_name = {}, service_type = {}, credential_store_enabled = {} from request and resulting" +
                  " credential store enabled status is = {}",
-                 request.getServiceDisplayName(), credentialStoreEnabled, enableCredStore);
+                 request.getServiceName(), request.getServiceType(), credentialStoreEnabled, enableCredStore);
       } else {
         boolean enableCredStore = credentialStoreSupported &&
                                   (credentialStoreRequired || serviceInfo.isCredentialStoreEnabled());
         s.setCredentialStoreEnabled(enableCredStore);
-        LOG.info("Service: {}, credential_store_enabled = {} from stack definition and resulting" +
+        LOG.info("Service: service_name = {}, service_type = {}, credential_store_enabled = {} from stack definition and resulting" +
                  " credential store enabled status is = {}",
-                 s.getServiceDisplayName(), serviceInfo.isCredentialStoreEnabled(), enableCredStore);
+                 s.getName(), s.getServiceType(), serviceInfo.isCredentialStoreEnabled(), enableCredStore);
       }
 
       // Initialize service widgets
@@ -565,8 +565,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     }
 
     Set<ServiceResponse> response = new HashSet<>();
-    if (request.getServiceDisplayName() != null) {
-      Service s = cluster.getService(request.getServiceDisplayName());
+    if (request.getServiceName() != null) {
+      Service s = cluster.getService(request.getServiceName());
       response.add(s.convertToResponse());
       return response;
     }
@@ -599,10 +599,10 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       String retrivedSvcGrp = serviceResponse.getServiceGroupName();
       if (retrivedSvcGrp == null) {
         throw new NullPointerException("'ServiceGroupName' : null in cluster : " + cluster.getClusterName() +
-                " for retrieved Service : "+s.getServiceDisplayName());
+                " for retrieved Service : "+s.getServiceName());
       }
       if (!retrivedSvcGrp.equals(request.getServiceGroupName())) {
-        throw new ServiceNotFoundException(cluster.getClusterName(), s.getServiceDisplayName());
+        throw new ServiceNotFoundException(cluster.getClusterName(), s.getServiceName());
       }
       */
       response.add(serviceResponse);
@@ -656,15 +656,15 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     for (ServiceRequest request : requests) {
       if (request.getClusterName() == null
           || request.getClusterName().isEmpty()
-          || request.getServiceDisplayName() == null
-          || request.getServiceDisplayName().isEmpty()) {
+          || request.getServiceName() == null
+          || request.getServiceName().isEmpty()) {
         throw new IllegalArgumentException("Invalid arguments, cluster name"
-            + " and/or service display name should be provided to update services");
+            + " and/or service name should be provided to update services");
       }
 
       LOG.info("Received a updateService request"
           + ", clusterName=" + request.getClusterName()
-          + ", serviceName=" + request.getServiceDisplayName()
+          + ", serviceName=" + request.getServiceName()
           + ", request=" + request);
 
       clusterIds.add(request.getClusterName());
@@ -679,15 +679,15 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       }
 
       if (serviceNames.get(request.getClusterName())
-          .contains(request.getServiceDisplayName())) {
+          .contains(request.getServiceName())) {
         // TODO throw single exception
         throw new IllegalArgumentException("Invalid request contains duplicate"
             + " service names");
       }
-      serviceNames.get(request.getClusterName()).add(request.getServiceDisplayName());
+      serviceNames.get(request.getClusterName()).add(request.getServiceName());
 
       Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceDisplayName());
+      Service s = cluster.getService(request.getServiceName());
       State oldState = s.getDesiredState();
       State newState = null;
       if (request.getDesiredState() != null) {
@@ -735,8 +735,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
                                              "as it is required by the service. Service=" + s.getName());
         }
         serviceCredentialStoreEnabledMap.put(s, credentialStoreEnabled);
-        LOG.info("Service: {}, credential_store_enabled from request: {}", request.getServiceDisplayName(),
-            credentialStoreEnabled);
+        LOG.info("Service: service_name = {}, service_type = {}, credential_store_enabled from request: {}",
+          request.getServiceName(), request.getServiceType(), credentialStoreEnabled);
       }
 
       if (StringUtils.isNotEmpty(request.getCredentialStoreSupported())) {
@@ -747,7 +747,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       if (newState == null) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Nothing to do for new updateService request, clusterId={}, serviceName={}, newDesiredState=null",
-            request.getClusterName(), request.getServiceDisplayName());
+            request.getClusterName(), request.getServiceName());
         }
         continue;
       }
@@ -952,7 +952,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     Set<Service> removable = new HashSet<>();
 
     for (ServiceRequest serviceRequest : request) {
-      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceDisplayName())) {
+      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceName())) {
         // FIXME throw correct error
         throw new AmbariException("invalid arguments");
       } else {
@@ -963,7 +963,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
         Service service = clusters.getCluster(
             serviceRequest.getClusterName()).getService(
-          serviceRequest.getServiceDisplayName());
+          serviceRequest.getServiceName());
 
         //
         // Run through the list of service component hosts. If all host components are in removable state,
@@ -977,7 +977,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
           for (Map.Entry<String, ServiceComponentHost> entry : schHostMap.entrySet()) {
             ServiceComponentHost sch = entry.getValue();
             if (!sch.canBeRemoved()) {
-              String msg = "Cannot remove " + serviceRequest.getClusterName() + "/" + serviceRequest.getServiceDisplayName() +
+              String msg = "Cannot remove " + serviceRequest.getClusterName() + "/" + serviceRequest.getServiceName() +
                       ". " + sch.getServiceComponentName() + "on " + sch.getHost() + " is in " +
                       String.valueOf(sch.getDesiredState()) + " state.";
               LOG.error(msg);
@@ -988,7 +988,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
         if (!isServiceRemovable) {
           throw new AmbariException ("Cannot remove " +
-                  serviceRequest.getClusterName() + "/" + serviceRequest.getServiceDisplayName() +
+                  serviceRequest.getClusterName() + "/" + serviceRequest.getServiceName() +
                     ". " + "One or more host components are in a non-removable state.");
         }
 
@@ -1081,21 +1081,18 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
           throws AuthorizationException, AmbariException {
 
     AmbariMetaInfo ambariMetaInfo = getManagementController().getAmbariMetaInfo();
-    Map<String, Set<String>> serviceDisplayNames = new HashMap<>();
+    Map<String, Set<String>> serviceNames = new HashMap<>();
     Set<String> duplicates = new HashSet<>();
 
     for (ServiceRequest request : requests) {
       final String clusterName = request.getClusterName();
       final String serviceGroupName = request.getServiceGroupName();
       final String serviceName = request.getServiceName();
-      final String serviceDisplayName = request.getServiceDisplayName();
+      final String serviceType = request.getServiceType();
       final StackId desiredStackId = request.getDesiredStackId();
       Validate.notNull(clusterName, "Cluster name should be provided when creating a service");
       Validate.notNull(serviceGroupName, "Service group name should be provided when creating a service");
       Validate.notEmpty(serviceName, "Service name should be provided when creating a service");
-      Validate.notEmpty(serviceDisplayName, "Service display name should be provided when creating a service");
-      //Todo Add after UI is ready
-      //Validate.notNull(desiredStackId, "Service display name should be provided when creating a service");
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Received a createService request, clusterId={}, serviceName={}, request={}", clusterName, serviceName, request);
@@ -1105,16 +1102,16 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         throw new AuthorizationException("The user is not authorized to create services");
       }
 
-      if (!serviceDisplayNames.containsKey(clusterName)) {
-        serviceDisplayNames.put(clusterName, new HashSet<String>());
+      if (!serviceNames.containsKey(clusterName)) {
+        serviceNames.put(clusterName, new HashSet<String>());
       }
 
-      if (serviceDisplayNames.get(clusterName).contains(serviceDisplayName)) {
+      if (serviceNames.get(clusterName).contains(serviceName)) {
         // throw error later for dup
-        duplicates.add(serviceDisplayName);
+        duplicates.add(serviceName);
         continue;
       }
-      serviceDisplayNames.get(clusterName).add(serviceDisplayName);
+      serviceNames.get(clusterName).add(serviceName);
 
       if (StringUtils.isNotEmpty(request.getDesiredState())) {
         State state = State.valueOf(request.getDesiredState());
@@ -1132,10 +1129,10 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
         throw new ParentObjectNotFoundException("Attempted to add a service to a cluster which doesn't exist", e);
       }
       try {
-        Service s = cluster.getService(serviceDisplayName);
+        Service s = cluster.getService(serviceName);
         if (s != null) {
           // throw error later for dup
-          duplicates.add(serviceDisplayName);
+          duplicates.add(serviceName);
           continue;
         }
       } catch (ServiceNotFoundException e) {
@@ -1195,26 +1192,31 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       request.setResolvedRepository(repositoryVersion);
 
+      String stackServiceName = serviceType;
+      if(StringUtils.isBlank(serviceType)) {
+        stackServiceName = serviceName;
+      }
       if (!ambariMetaInfo.isValidService(stackId.getStackName(),
-              stackId.getStackVersion(), request.getServiceDisplayName())) {
+              stackId.getStackVersion(), stackServiceName)) {
         throw new IllegalArgumentException("Unsupported or invalid service in stack, clusterId=" + clusterName
-                + ", serviceDisplayName=" + serviceDisplayName + ", stackInfo=" + stackId.getStackId());
+          + ", serviceName=" + serviceName + ", stackServiceName=" + stackServiceName
+          + ", stackInfo=" + stackId.getStackId());
       }
 
       // validate the credential store input provided
       ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-          stackId.getStackVersion(), request.getServiceDisplayName());
+          stackId.getStackVersion(), stackServiceName);
 
       if (StringUtils.isNotEmpty(request.getCredentialStoreEnabled())) {
         boolean credentialStoreEnabled = Boolean.parseBoolean(request.getCredentialStoreEnabled());
         if (!serviceInfo.isCredentialStoreSupported() && credentialStoreEnabled) {
           throw new IllegalArgumentException("Invalid arguments, cannot enable credential store " +
-              "as it is not supported by the service. Service=" + request.getServiceDisplayName());
+              "as it is not supported by the service. Service=" + request.getServiceName());
         }
       }
     }
     // ensure only a single cluster update
-    if (serviceDisplayNames.size() != 1) {
+    if (serviceNames.size() != 1) {
       throw new IllegalArgumentException("Invalid arguments, updates allowed"
               + "on only one cluster at a time");
     }
@@ -1223,7 +1225,7 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     if (!duplicates.isEmpty()) {
       String clusterName = requests.iterator().next().getClusterName();
       String msg = "Attempted to create a service which already exists: "
-              + ", clusterName=" + clusterName  + " serviceDisplayName=" + StringUtils.join(duplicates, ",");
+              + ", clusterName=" + clusterName  + " serviceName=" + StringUtils.join(duplicates, ",");
 
       throw new DuplicateResourceException(msg);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
index 93baefe..e3f4e1a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java
@@ -87,9 +87,9 @@ public class DatabaseChecker {
         if (serviceDesiredStateEntity == null) {
           checkPassed = false;
           LOG.error(String.format("ServiceDesiredStateEntity is null for " +
-              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceDisplayName=%s, serviceName=%s ",
-            clusterEntity.getClusterName(), clusterServiceEntity.getServiceDisplayName(),
-            clusterServiceEntity.getServiceName()));
+              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s ",
+            clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(),
+            clusterServiceEntity.getServiceType()));
         }
         Collection<ServiceComponentDesiredStateEntity> scDesiredStateEntities =
           clusterServiceEntity.getServiceComponentDesiredStateEntities();
@@ -97,9 +97,9 @@ public class DatabaseChecker {
           scDesiredStateEntities.isEmpty()) {
           checkPassed = false;
           LOG.error(String.format("serviceComponentDesiredStateEntities is null or empty for " +
-              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceDisplayName=%s, serviceName=%s ",
-              clusterEntity.getClusterName(), clusterServiceEntity.getServiceDisplayName(),
-              clusterServiceEntity.getServiceName()));
+              "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s ",
+              clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(),
+              clusterServiceEntity.getServiceType()));
         } else {
           for (ServiceComponentDesiredStateEntity scDesiredStateEnity : scDesiredStateEntities) {
 
@@ -121,28 +121,28 @@ public class DatabaseChecker {
             if (schDesiredStateEntities == null) {
               componentCheckFailed = true;
               LOG.error(String.format("hostComponentDesiredStateEntities is null for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceDisplayName=%s, componentName=%s ",
-                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceDisplayName(),
+                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s, componentName=%s ",
+                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceType(),
                 scDesiredStateEnity.getComponentName()));
             } else if (!zeroCardinality && schDesiredStateEntities.isEmpty()) {
               componentCheckFailed = true;
               LOG.error(String.format("hostComponentDesiredStateEntities is empty for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceDisplayName=%s, componentName=%s ",
-                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceDisplayName(),
+                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s, componentName=%s ",
+                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceType(),
                 scDesiredStateEnity.getComponentName()));
             }
 
             if (schStateEntities == null) {
               componentCheckFailed = true;
               LOG.error(String.format("hostComponentStateEntities is null for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceDisplayName=%s, componentName=%s ",
-                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceDisplayName(),
+                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s, componentName=%s ",
+                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceType(),
                 scDesiredStateEnity.getComponentName()));
             } else if (!zeroCardinality && schStateEntities.isEmpty()) {
               componentCheckFailed = true;
               LOG.error(String.format("hostComponentStateEntities is empty for " +
-                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceDisplayName=%s, componentName=%s ",
-                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceDisplayName(),
+                  "ServiceComponentDesiredStateEntity, clusterName=%s, serviceName=%s, serviceType=%s, componentName=%s ",
+                clusterEntity.getClusterName(), clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceType(),
                 scDesiredStateEnity.getComponentName()));
             }
 
@@ -151,8 +151,8 @@ public class DatabaseChecker {
               checkPassed = false;
               LOG.error(String.format("HostComponentStateEntities and HostComponentDesiredStateEntities " +
                   "tables must contain equal number of rows mapped to ServiceComponentDesiredStateEntity, " +
-                  "(clusterName=%s, serviceName=%s, serviceDisplayName=%s, componentName=%s) ", clusterEntity.getClusterName(),
-                clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceDisplayName(), scDesiredStateEnity.getComponentName()));
+                  "(clusterName=%s, serviceName=%s, serviceType=%s, componentName=%s) ", clusterEntity.getClusterName(),
+                clusterServiceEntity.getServiceName(), clusterServiceEntity.getServiceType(), scDesiredStateEnity.getComponentName()));
             }
             checkPassed = checkPassed && !componentCheckFailed;
           }
@@ -231,26 +231,27 @@ public class DatabaseChecker {
           for (ClusterServiceEntity clusterServiceEntity : clusterServiceEntities) {
             if (!State.INIT.equals(
                 clusterServiceEntity.getServiceDesiredStateEntity().getDesiredState())) {
-              String serviceDisplayName = clusterServiceEntity.getServiceDisplayName();
+              String serviceName = clusterServiceEntity.getServiceName();
+              String stackServiceName = clusterServiceEntity.getServiceType();
               Long serviceGroupId = clusterServiceEntity.getServiceGroupId();
               ServiceInfo serviceInfo = ambariMetaInfo.getService(stack.getName(),
-                  stack.getVersion(), serviceDisplayName);
+                  stack.getVersion(), stackServiceName);
 
               for (String configTypeName : serviceInfo.getConfigTypeAttributes().keySet()) {
                 if (selectedCountForType.get(configTypeName) == null) {
                   checkPassed = false;
                   LOG.error("Configuration {} is missing for service {}", configTypeName,
-                    serviceDisplayName);
+                    serviceName);
                 } else {
                   // Check that for each config type exactly one is selected
                   if (selectedCountForType.get(configTypeName) == 0) {
                     checkPassed = false;
                     LOG.error("Configuration {} has no enabled entries for service {}",
-                        configTypeName, serviceDisplayName);
+                        configTypeName, serviceName);
                   } else if (selectedCountForType.get(configTypeName) > 1) {
                     checkPassed = false;
                     LOG.error("Configuration {} has more than 1 enabled entry for service {}",
-                        configTypeName, serviceDisplayName);
+                        configTypeName, serviceName);
                   }
                 }
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentInstalledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentInstalledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentInstalledEvent.java
index 8c2e6d4..b0c39f5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentInstalledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentInstalledEvent.java
@@ -33,14 +33,15 @@ public class ServiceComponentInstalledEvent extends ServiceEvent {
    * @param stackName
    * @param stackVersion
    * @param serviceName
+   * @param serviceType
    * @param componentName
    * @param hostName
    */
   public ServiceComponentInstalledEvent(long clusterId, String stackName, String stackVersion, String serviceName,
-                                        String serviceDisplayName, String serviceGroupName, String componentName,
+                                        String serviceType, String serviceGroupName, String componentName,
                                         String hostName, boolean recoveryEnabled) {
     super(AmbariEventType.SERVICE_COMPONENT_INSTALL_SUCCESS, clusterId, stackName,
-          stackVersion, serviceName, serviceDisplayName, serviceGroupName);
+          stackVersion, serviceName, serviceType, serviceGroupName);
 
     m_componentName = componentName;
     m_hostName = hostName;
@@ -72,7 +73,7 @@ public class ServiceComponentInstalledEvent extends ServiceEvent {
     buffer.append(", stackName=").append(m_stackName);
     buffer.append(", stackVersion=").append(m_stackVersion);
     buffer.append(", serviceName=").append(m_serviceName);
-    buffer.append(", serviceDisplayName=").append(m_serviceDisplayName);
+    buffer.append(", serviceType=").append(m_serviceType);
     buffer.append(", serviceGroupName=").append(m_serviceGroupName);
     buffer.append(", componentName=").append(m_componentName);
     buffer.append(", hostName=").append(m_hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
index 803770d..2ad2940 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceComponentUninstalledEvent.java
@@ -39,10 +39,10 @@ public class ServiceComponentUninstalledEvent extends ServiceEvent {
    * @param hostName
    */
   public ServiceComponentUninstalledEvent(long clusterId, String stackName, String stackVersion, String serviceName,
-                                          String serviceDisplayName, String serviceGroupName, String componentName,
+                                          String serviceType, String serviceGroupName, String componentName,
                                           String hostName, boolean recoveryEnabled) {
     super(AmbariEventType.SERVICE_COMPONENT_UNINSTALLED_SUCCESS, clusterId,
-      stackName, stackVersion, serviceName, serviceDisplayName, serviceGroupName);
+      stackName, stackVersion, serviceName, serviceType, serviceGroupName);
 
     m_componentName = componentName;
     m_hostName = hostName;
@@ -80,7 +80,7 @@ public class ServiceComponentUninstalledEvent extends ServiceEvent {
     buffer.append(", stackName=").append(m_stackName);
     buffer.append(", stackVersion=").append(m_stackVersion);
     buffer.append(", serviceName=").append(m_serviceName);
-    buffer.append(", serviceDisplayName=").append(m_serviceDisplayName);
+    buffer.append(", serviceType=").append(m_serviceType);
     buffer.append(", serviceGroupName=").append(m_serviceGroupName);
     buffer.append(", componentName=").append(m_componentName);
     buffer.append(", hostName=").append(m_hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceEvent.java
index 622c9d7..79f1566 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceEvent.java
@@ -29,9 +29,9 @@ public abstract class ServiceEvent extends ClusterEvent {
   protected final String m_serviceName;
 
   /**
-   * The name of the real service.
+   * The type of the service (i.e. stack service name).
    */
-  protected final String m_serviceDisplayName;
+  protected final String m_serviceType;
 
   /**
    * The name of the service group.
@@ -55,12 +55,12 @@ public abstract class ServiceEvent extends ClusterEvent {
    * @param clusterId
    */
   public ServiceEvent(AmbariEventType eventType, long clusterId, String stackName, String stackVersion,
-                      String serviceName, String serviceDisplayName, String serviceGroupName) {
+                      String serviceName, String serviceType, String serviceGroupName) {
     super(eventType, clusterId);
     m_stackName = stackName;
     m_stackVersion = stackVersion;
     m_serviceName = serviceName;
-    m_serviceDisplayName = serviceDisplayName;
+    m_serviceType = serviceType;
     m_serviceGroupName = serviceGroupName;
   }
 
@@ -72,9 +72,9 @@ public abstract class ServiceEvent extends ClusterEvent {
   }
 
   /**
-   * @return the serviceDisplayName (never {@code null}).
+   * @return the serviceType (never {@code null}).
    */
-  public String getServiceDisplayName() { return m_serviceDisplayName; }
+  public String getServiceType() { return m_serviceType; }
 
   /**
    * @return the service group name (never {@code null}).

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceInstalledEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceInstalledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceInstalledEvent.java
index c7c7dd4..aa0a5ea 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceInstalledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceInstalledEvent.java
@@ -29,13 +29,13 @@ public class ServiceInstalledEvent extends ServiceEvent {
    * @param stackName
    * @param stackVersion
    * @param serviceName
-   * @param serviceDisplayName
+   * @param serviceType
    * @param serviceGroupName
    */
   public ServiceInstalledEvent(long clusterId, String stackName, String stackVersion, String serviceName,
-                               String serviceDisplayName, String serviceGroupName) {
+                               String serviceType, String serviceGroupName) {
     super(AmbariEventType.SERVICE_INSTALL_SUCCESS, clusterId, stackName,
-        stackVersion, serviceName, serviceDisplayName, serviceGroupName);
+        stackVersion, serviceName, serviceType, serviceGroupName);
   }
 
   /**
@@ -48,7 +48,7 @@ public class ServiceInstalledEvent extends ServiceEvent {
     buffer.append(", stackName=").append(m_stackName);
     buffer.append(", stackVersion=").append(m_stackVersion);
     buffer.append(", serviceName=").append(m_serviceName);
-    buffer.append(", serviceDisplayName=").append(m_serviceDisplayName);
+    buffer.append(", serviceType=").append(m_serviceType);
     buffer.append(", serviceGroupName=").append(m_serviceGroupName);
     buffer.append("}");
     return buffer.toString();

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
index a8de84d..bc27247 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ServiceRemovedEvent.java
@@ -31,9 +31,9 @@ public class ServiceRemovedEvent extends ServiceEvent {
   private final List<Component> components;
 
   public ServiceRemovedEvent(long clusterId, String stackName, String stackVersion, String serviceName,
-                             String serviceDisplayName, String serviceGroupName, List<Component> components) {
+                             String serviceType, String serviceGroupName, List<Component> components) {
     super(AmbariEventType.SERVICE_REMOVED_SUCCESS, clusterId, stackName,
-      stackVersion, serviceName, serviceDisplayName, serviceGroupName);
+      stackVersion, serviceName, serviceType, serviceGroupName);
     this.components = components;
   }
 
@@ -47,7 +47,7 @@ public class ServiceRemovedEvent extends ServiceEvent {
     buffer.append(", stackName=").append(m_stackName);
     buffer.append(", stackVersion=").append(m_stackVersion);
     buffer.append(", serviceName=").append(m_serviceName);
-    buffer.append(", serviceDisplayName=").append(m_serviceDisplayName);
+    buffer.append(", serviceType=").append(m_serviceType);
     buffer.append(", serviceGroupName=").append(m_serviceGroupName);
     buffer.append("}");
     return buffer.toString();

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
index fb6d017..01cdcdc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
@@ -122,13 +122,14 @@ public class HostVersionOutOfSyncListener {
         // If added components do not advertise version, it makes no sense to mark version OUT_OF_SYNC
         // We perform check per-stack version, because component may be not versionAdvertised in current
         // stack, but become versionAdvertised in some future (installed, but not yet upgraded to) stack
-        String serviceDisplayName = event.getServiceDisplayName();
+        String serviceName = event.getServiceName();
+        String serviceType = event.getServiceType();
         String componentName = event.getComponentName();
         ComponentInfo component = ami.get().getComponent(hostStackId.getStackName(),
-                hostStackId.getStackVersion(), serviceDisplayName, componentName);
+                hostStackId.getStackVersion(), serviceType, componentName);
 
         if (!component.isVersionAdvertised()) {
-          RepositoryVersionState state = checkAllHostComponents(hostStackId, serviceDisplayName,
+          RepositoryVersionState state = checkAllHostComponents(hostStackId, serviceName, serviceType,
                                                                 hostVersionEntity.getHostEntity());
           if (null != state) {
             hostVersionEntity.setState(state);
@@ -169,14 +170,15 @@ public class HostVersionOutOfSyncListener {
         HostEntity hostEntity = hostVersionEntity.getHostEntity();
         RepositoryVersionEntity repoVersionEntity = hostVersionEntity.getRepositoryVersion();
         StackId stackId = repoVersionEntity.getStackId();
-        String serviceDisplayName = event.getServiceDisplayName();
+        String serviceName = event.getServiceName();
+        String serviceType = event.getServiceType();
         if (null == stackId) {
           LOG.info("Stack id could not be loaded for host version {}, repo {}", hostVersionEntity.getHostName(),
               repoVersionEntity.getVersion());
           continue;
         }
 
-        RepositoryVersionState repoState = checkAllHostComponents(stackId, serviceDisplayName, hostEntity);
+        RepositoryVersionState repoState = checkAllHostComponents(stackId, serviceName, serviceType, hostEntity);
         if (null != repoState) {
           hostVersionEntity.setState(repoState);
           hostVersionDAO.get().merge(hostVersionEntity);
@@ -199,14 +201,14 @@ public class HostVersionOutOfSyncListener {
    * @param host    the host entity to find components
    * @return {@code null} if there should be no state change.  non-{@code null} to change it
    */
-  private RepositoryVersionState checkAllHostComponents(StackId stackId, String serviceDisplayName,
+  private RepositoryVersionState checkAllHostComponents(StackId stackId, String serviceName, String stackServiceName,
       HostEntity host) throws AmbariException {
 
     Collection<HostComponentDesiredStateEntity> hostComponents = host.getHostComponentDesiredStateEntities();
 
     for (HostComponentDesiredStateEntity hostComponent : hostComponents) {
       ComponentInfo ci = ami.get().getComponent(stackId.getStackName(), stackId.getStackVersion(),
-              serviceDisplayName, hostComponent.getComponentName());
+        stackServiceName, hostComponent.getComponentName());
 
       if (ci.isVersionAdvertised()) {
         return null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
index f4ce5f9..9fca099 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
@@ -71,8 +71,8 @@ public class ClusterServiceEntity {
   @Column(name = "service_name", nullable = false, insertable = true, updatable = true)
   private String serviceName;
 
-  @Column(name = "service_display_name", nullable = false, insertable = true, updatable = true)
-  private String serviceDisplayName;
+  @Column(name = "service_type", nullable = false, insertable = true, updatable = true)
+  private String serviceType;
 
   @Basic
   @Column(name = "service_enabled", nullable = false, insertable = true, updatable = true, length = 10)
@@ -126,12 +126,12 @@ public class ClusterServiceEntity {
     this.serviceName = serviceName;
   }
 
-  public String getServiceDisplayName() {
-    return serviceDisplayName;
+  public String getServiceType() {
+    return serviceType;
   }
 
-  public void setServiceDisplayName(String serviceDisplayName) {
-    this.serviceDisplayName = serviceDisplayName;
+  public void setServiceType(String serviceType) {
+    this.serviceType = serviceType;
   }
 
   public int getServiceEnabled() {
@@ -154,7 +154,7 @@ public class ClusterServiceEntity {
     if (serviceId != null ? !serviceId.equals(that.serviceId) : that.serviceId != null) return false;
     if (serviceEnabled != null ? !serviceEnabled.equals(that.serviceEnabled) : that.serviceEnabled != null)
       return false;
-    if (serviceDisplayName != null ? !serviceDisplayName.equals(that.serviceDisplayName) : that.serviceDisplayName != null) return false;
+    if (serviceType != null ? !serviceType.equals(that.serviceType) : that.serviceType != null) return false;
 
     return true;
   }
@@ -164,7 +164,7 @@ public class ClusterServiceEntity {
     int result = clusterId !=null ? clusterId.intValue() : 0;
     result = 31 * result + (serviceGroupId != null ? serviceGroupId.hashCode() : 0);
     result = 31 * result + (serviceId != null ? serviceId.hashCode() : 0);
-    result = 31 * result + (serviceDisplayName != null ? serviceDisplayName.hashCode() : 0);
+    result = 31 * result + (serviceType != null ? serviceType.hashCode() : 0);
     result = 31 * result + serviceEnabled;
     return result;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 0c1059b..8fd878e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -69,7 +69,7 @@ public interface Cluster {
    */
   void addService(Service service);
 
-  Service addService(ServiceGroup serviceGroup, String serviceName, String serviceDisplayName,
+  Service addService(ServiceGroup serviceGroup, String serviceName, String serviceType,
                      RepositoryVersionEntity repositoryVersion) throws AmbariException;
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index ce0afa9..b9ab79f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -36,7 +36,9 @@ public interface Service {
 
   String getName();
 
-  String getServiceDisplayName();
+  String getDisplayName();
+
+  String getServiceType();
 
   Cluster getCluster();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index dd25992..3c44fdf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -44,7 +44,7 @@ public interface ServiceComponent {
 
   String getServiceName();
 
-  String getServiceDisplayName();
+  String getServiceType();
 
   Long getServiceGroupId();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
index 1b35072..54c378d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
@@ -65,10 +65,10 @@ public interface ServiceComponentHost {
   String getServiceGroupName();
 
   /**
-   * Get the Real Service this object maps to
-   * @return Name of the Real Service
+   * Get the Service Type this object maps to
+   * @return Type of the Service
    */
-  String getServiceDisplayName();
+  String getServiceType();
 
   /**
    * Get the ServiceComponent this object maps to

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 7e66734..df6437e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -260,7 +260,7 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
-  public String getServiceDisplayName() { return service.getServiceDisplayName(); }
+  public String getServiceType() { return service.getServiceType(); }
 
   @Override
   public Long getClusterId() { return service.getClusterId(); }
@@ -439,7 +439,7 @@ public class ServiceComponentImpl implements ServiceComponent {
     }
     ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
         cluster.getClusterName(), sg.getServiceGroupId(), sg.getServiceGroupName(), service.getServiceId(),
-        service.getName(), service.getServiceDisplayName(), getName(), desiredStackId, getDesiredState().toString(),
+        service.getName(), service.getServiceType(), getName(), desiredStackId, getDesiredState().toString(),
         getServiceComponentStateCount(), isRecoveryEnabled(), displayName, repositoryVersionEntity.getVersion(),
         getRepositoryState());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
index 883fa2d..b48d825 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
@@ -34,15 +34,15 @@ public interface ServiceFactory {
    *          the ServiceGroup for the service
    * @param serviceName
    *          the name of the service (not {@code null).
-   * @param serviceDisplayName
-   *          the display name of the service (not {@code null).
+   * @param serviceType
+   *          the type of service (stack service name) (not {@code null).
    * @param desiredRepositoryVersion
    *          the repository version of the service (not {@code null).
    * @return
    */
   Service createNew(Cluster cluster, ServiceGroup serviceGroup,
                     @Assisted("serviceName") String serviceName,
-                    @Assisted("serviceDisplayName") String serviceDisplayName,
+                    @Assisted("serviceType") String serviceType,
                     RepositoryVersionEntity desiredRepositoryVersion);
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index afb315c..70344b0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -107,11 +107,11 @@ public class ServiceImpl implements Service {
   /**
    * The stack service name.
    */
-  private final String serviceDisplayName;
+  private final String serviceType;
 
   @AssistedInject
   ServiceImpl(@Assisted Cluster cluster, @Assisted ServiceGroup serviceGroup,
-              @Assisted("serviceName") String serviceName, @Assisted("serviceDisplayName") String serviceDisplayName,
+              @Assisted("serviceName") String serviceName, @Assisted("serviceType") String serviceType,
               @Assisted RepositoryVersionEntity desiredRepositoryVersion,
               ClusterDAO clusterDAO, ServiceGroupDAO serviceGroupDAO,
               ClusterServiceDAO clusterServiceDAO, ServiceDesiredStateDAO serviceDesiredStateDAO,
@@ -126,14 +126,14 @@ public class ServiceImpl implements Service {
     this.serviceComponentFactory = serviceComponentFactory;
     this.eventPublisher = eventPublisher;
     this.serviceName = serviceName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceType = serviceType;
     this.ambariMetaInfo = ambariMetaInfo;
 
     ClusterServiceEntity serviceEntity = new ClusterServiceEntity();
     serviceEntity.setClusterId(cluster.getClusterId());
     serviceEntity.setServiceGroupId(serviceGroup.getServiceGroupId());
     serviceEntity.setServiceName(serviceName);
-    serviceEntity.setServiceDisplayName(serviceDisplayName);
+    serviceEntity.setServiceType(serviceType);
 
     ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
     serviceDesiredStateEntity.setClusterId(cluster.getClusterId());
@@ -148,7 +148,7 @@ public class ServiceImpl implements Service {
     StackId stackId = desiredRepositoryVersion.getStackId();
 
     ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
-        stackId.getStackVersion(), serviceDisplayName);
+        stackId.getStackVersion(), serviceType);
 
     displayName = sInfo.getDisplayName();
     isClientOnlyService = sInfo.isClientOnlyService();
@@ -175,7 +175,7 @@ public class ServiceImpl implements Service {
     this.eventPublisher = eventPublisher;
     this.serviceId = serviceEntity.getServiceId();
     this.serviceName = serviceEntity.getServiceName();
-    this.serviceDisplayName = serviceEntity.getServiceDisplayName();
+    this.serviceType = serviceEntity.getServiceType();
     this.ambariMetaInfo = ambariMetaInfo;
 
     ServiceDesiredStateEntity serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
@@ -193,7 +193,7 @@ public class ServiceImpl implements Service {
             StackId stackId = new StackId(serviceComponentDesiredStateEntity.getDesiredStack());
             LOG.error(String.format("Can not get component info: stackName=%s, stackVersion=%s, serviceName=%s, componentName=%s",
               stackId.getStackName(), stackId.getStackVersion(),
-              serviceEntity.getServiceDisplayName(), serviceComponentDesiredStateEntity.getComponentName()));
+              serviceEntity.getServiceName(), serviceComponentDesiredStateEntity.getComponentName()));
             ex.printStackTrace();
           }
       }
@@ -235,8 +235,11 @@ public class ServiceImpl implements Service {
   public String getName() { return serviceName; }
 
   @Override
-  public String getServiceDisplayName() {
-    return serviceDisplayName;
+  public String getDisplayName() { return displayName; }
+
+  @Override
+  public String getServiceType() {
+    return serviceType;
   }
 
   @Override
@@ -276,7 +279,7 @@ public class ServiceImpl implements Service {
       throw new AmbariException("Cannot add duplicate ServiceComponent"
           + ", clusterName=" + cluster.getClusterName()
           + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getServiceDisplayName()
+          + ", serviceName=" + getName()
           + ", serviceComponentName=" + component.getName());
     }
 
@@ -297,7 +300,7 @@ public class ServiceImpl implements Service {
     ServiceComponent serviceComponent = components.get(componentName);
     if (null == serviceComponent) {
       throw new ServiceComponentNotFoundException(cluster.getClusterName(),
-          getName(), getServiceDisplayName(), serviceGroup.getServiceGroupName(), componentName);
+          getName(), getServiceType(), serviceGroup.getServiceGroupName(), componentName);
     }
 
     return serviceComponent;
@@ -313,7 +316,7 @@ public class ServiceImpl implements Service {
   public void setDesiredState(State state) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Setting DesiredState of Service, clusterName={}, clusterId={}, serviceName={}, oldDesiredState={}, newDesiredState={}",
-        cluster.getClusterName(), cluster.getClusterId(), getServiceDisplayName(), getDesiredState(), state);
+        cluster.getClusterName(), cluster.getClusterId(), getName(), getDesiredState(), state);
     }
 
     ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
@@ -385,7 +388,7 @@ public class ServiceImpl implements Service {
 
     ServiceResponse r = new ServiceResponse(cluster.getClusterId(), cluster.getClusterName(),
         serviceGroup.getServiceGroupId(), serviceGroup.getServiceGroupName(),
-        getServiceId(), getName(), getServiceDisplayName(), desiredStackId, desiredRespositoryVersion.getVersion(),
+        getServiceId(), getName(), getServiceType(), desiredStackId, desiredRespositoryVersion.getVersion(),
         getRepositoryState(), getDesiredState().toString(), isCredentialStoreSupported(), isCredentialStoreEnabled());
 
     r.setDesiredRepositoryVersionId(desiredRespositoryVersion.getId());
@@ -452,7 +455,7 @@ public class ServiceImpl implements Service {
   public void setCredentialStoreEnabled(boolean credentialStoreEnabled) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Setting CredentialStoreEnabled of Service, clusterName={}, clusterId={}, serviceName={}, oldCredentialStoreEnabled={}, newCredentialStoreEnabled={}",
-        cluster.getClusterName(), cluster.getClusterId(), getServiceDisplayName(), isCredentialStoreEnabled(), credentialStoreEnabled);
+        cluster.getClusterName(), cluster.getClusterId(), getName(), isCredentialStoreEnabled(), credentialStoreEnabled);
     }
 
     ServiceDesiredStateEntity desiredStateEntity = getServiceDesiredStateEntity();
@@ -469,7 +472,7 @@ public class ServiceImpl implements Service {
   @Override
   public void debugDump(StringBuilder sb) {
     sb.append("Service={ serviceName=").append(getName())
-      .append(", serviceDisplayName=").append(getServiceDisplayName())
+      .append(", serviceType=").append(getServiceType())
       .append(", clusterName=").append(cluster.getClusterName())
       .append(", clusterId=").append(cluster.getClusterId())
       .append(", desiredStackVersion=").append(getDesiredStackId())
@@ -499,7 +502,7 @@ public class ServiceImpl implements Service {
     cluster.addService(this);
 
     ServiceInstalledEvent event = new ServiceInstalledEvent(getClusterId(), stackId.getStackName(),
-        stackId.getStackVersion(), getName(), serviceDisplayName, serviceGroup.getServiceGroupName());
+        stackId.getStackVersion(), getName(), serviceType, serviceGroup.getServiceGroupName());
 
     eventPublisher.publish(event);
   }
@@ -533,8 +536,8 @@ public class ServiceImpl implements Service {
     for (ServiceComponent sc : components.values()) {
       if (!sc.canBeRemoved()) {
         LOG.warn("Found non-removable component when trying to delete service" + ", clusterName="
-            + cluster.getClusterName() + ", serviceName=" + getName() + ", serviceDisplayName="
-            + getServiceDisplayName() + ", componentName=" + sc.getName());
+            + cluster.getClusterName() + ", serviceName=" + getName() + ", serviceType="
+            + getServiceType() + ", componentName=" + sc.getName());
         return false;
       }
     }
@@ -555,7 +558,7 @@ public class ServiceImpl implements Service {
       }
     }
 
-    LOG.info("Deleting all configuration associations for {} on cluster {}", getServiceDisplayName(), cluster.getClusterName());
+    LOG.info("Deleting all configuration associations for {} on cluster {}", getName(), cluster.getClusterName());
 
     List<ServiceConfigEntity> serviceConfigEntities =
       serviceConfigDAO.findByService(cluster.getClusterId(), getName());
@@ -573,13 +576,13 @@ public class ServiceImpl implements Service {
     lock.lock();
     try {
       LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName()
-          + ", serviceDisplayName=" + getServiceDisplayName());
+          + ", serviceName=" + getName());
       // FIXME check dependencies from meta layer
       for (ServiceComponent component : components.values()) {
         if (!component.canBeRemoved()) {
           throw new AmbariException("Found non removable component when trying to"
               + " delete all components from service" + ", clusterName=" + cluster.getClusterName()
-              + ", getServiceDisplayName=" + getServiceDisplayName() + ", componentName=" + component.getName());
+              + ", serviceName=" + getName() + ", componentName=" + component.getName());
         }
       }
 
@@ -600,12 +603,12 @@ public class ServiceImpl implements Service {
     try {
       ServiceComponent component = getServiceComponent(componentName);
       LOG.info("Deleting servicecomponent for cluster" + ", clusterName=" + cluster.getClusterName()
-          + ", getServiceDisplayName=" + getServiceDisplayName() + ", componentName=" + componentName);
+          + ", serviceName=" + getName() + ", componentName=" + componentName);
       // FIXME check dependencies from meta layer
       if (!component.canBeRemoved()) {
         throw new AmbariException("Could not delete component from cluster"
             + ", clusterName=" + cluster.getClusterName()
-            + ", getServiceDisplayName=" + getServiceDisplayName()
+            + ", serviceName=" + getName()
             + ", componentName=" + componentName);
       }
 
@@ -638,7 +641,7 @@ public class ServiceImpl implements Service {
     }
 
     ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(), stackId.getStackVersion(),
-                                                        getName(), getServiceDisplayName(),
+                                                        getName(), getServiceType(),
                                                         serviceGroup.getServiceGroupName(), components);
 
     eventPublisher.publish(event);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index ab0e368..93d81f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -172,10 +172,10 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     return services;
   }
 
-  public ServiceInfo getService(String serviceName) {
+  public ServiceInfo getService(String stackServiceName) {
     Collection<ServiceInfo> services = getServices();
     for (ServiceInfo service : services) {
-      if (service.getName().equals(serviceName)) {
+      if (service.getName().equals(stackServiceName)) {
         return service;
       }
     }
@@ -638,7 +638,8 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   }
 
   /**
-   * @param xml the version definition parsed from {@link LatestRepoCallable}
+   *
+   * @return
    */
   public VersionDefinitionXml getLatestVersionDefinition() {
     return latestVersion;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 6b32d4c..e39f74e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -856,8 +856,8 @@ public class ClusterImpl implements Cluster {
   @Override
   public void addService(Service service) {
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding a new Service, clusterName={}, clusterId={}, serviceName={} serviceDisplayName={}",
-                 getClusterName(), getClusterId(), service.getName(), service.getServiceDisplayName());
+      LOG.debug("Adding a new Service, clusterName={}, clusterId={}, serviceName={} serviceType={}",
+                 getClusterName(), getClusterId(), service.getName(), service.getServiceType());
     }
     services.put(service.getName(), service);
   }
@@ -866,17 +866,17 @@ public class ClusterImpl implements Cluster {
    * {@inheritDoc}
    */
   @Override
-  public Service addService(ServiceGroup serviceGroup, String serviceName, String serviceDisplayName,
+  public Service addService(ServiceGroup serviceGroup, String serviceName, String serviceType,
                             RepositoryVersionEntity repositoryVersion) throws AmbariException {
     if (services.containsKey(serviceName)) {
-      String message = MessageFormat.format("The {0} service already exists in {1}", serviceDisplayName,
+      String message = MessageFormat.format("The {0} service already exists in {1}", serviceName,
         getClusterName());
 
       throw new AmbariException(message);
     }
 
     @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
-    Service service = serviceFactory.createNew(this, serviceGroup, serviceName, serviceDisplayName, repositoryVersion);
+    Service service = serviceFactory.createNew(this, serviceGroup, serviceName, serviceType, repositoryVersion);
     addService(service);
 
     return service;


[40/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
 AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e19db403
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e19db403
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e19db403

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: e19db403e8a3320a5b47503e43e2a35277149a3f
Parents: e037a8d
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 15:32:10 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 15:32:10 2017 +0300

----------------------------------------------------------------------
 .../python/resource_management/libraries/script/script.py |  4 ++--
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py                | 10 ++++++----
 .../HDFS/2.1.0.2.0/package/scripts/install_params.py      |  6 ------
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py        |  2 --
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py                | 10 ++++++----
 .../HDFS/3.0.0.3.0/package/scripts/install_params.py      |  6 ------
 .../HDFS/3.0.0.3.0/package/scripts/params_linux.py        |  2 --
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py              |  6 ++++--
 .../OOZIE/4.0.0.2.0/package/scripts/params_linux.py       |  3 ---
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py              |  5 +++--
 .../OOZIE/4.2.0.3.0/package/scripts/params_linux.py       |  3 ---
 11 files changed, 21 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 4282213..bf8c0dc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -778,10 +778,10 @@ class Script(object):
 
     pkg_provider = get_provider("Package")   
     try:
-      available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
     except Exception as err:
       Logger.exception("Unable to load available packages")
-      available_packages_in_repos = []
+      self.available_packages_in_repos = []
 
 
   def install_packages(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 76b430b..bb6349b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -389,7 +388,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index de735f4..2fa6208 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -378,7 +377,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 64f9d54..f215a1e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,6 +37,7 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.core.resources.packaging import Package
 from resource_management.core.shell import as_user, as_sudo, call, checked_call
 from resource_management.core.exceptions import Fail
@@ -305,8 +306,9 @@ def oozie_server_specific(upgrade_type):
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index b66e157..a0f0672 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,7 +30,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,5 +387,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index d916d3b..0771e93 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,8 +275,9 @@ def oozie_server_specific():
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/e19db403/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index d30a465..70b89b7 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,7 +28,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -370,5 +369,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)


[16/50] [abbrv] ambari git commit: AMBARI-22164: Update services API to user service_name, service_type instead of service_display_name, service_name (jluniya)

Posted by ja...@apache.org.
AMBARI-22164: Update services API to user service_name, service_type instead of service_display_name, service_name (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ba1ec6dd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ba1ec6dd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ba1ec6dd

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: ba1ec6ddb3fd8f5f33bbf6922eb58ca94866d1f7
Parents: 53333da
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sun Oct 8 21:40:06 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sun Oct 8 21:40:06 2017 -0700

----------------------------------------------------------------------
 .../ServiceComponentNotFoundException.java      |   4 +-
 .../ambari/server/ServiceNotFoundException.java |   4 +-
 .../ambari/server/agent/HeartbeatMonitor.java   |  10 +-
 .../ambari/server/agent/HeartbeatProcessor.java |   6 +-
 .../ambari/server/agent/StatusCommand.java      |  10 +-
 .../server/api/services/AmbariMetaInfo.java     |  74 +++++-----
 .../server/api/services/ComponentService.java   |  30 ++--
 .../server/api/services/ServiceService.java     | 146 +++++++++----------
 .../checks/SecondaryNamenodeDeletedCheck.java   |   2 +-
 .../AmbariManagementControllerImpl.java         |  50 +++----
 .../controller/ServiceComponentHostRequest.java |  16 +-
 .../ServiceComponentHostResponse.java           |  18 +--
 .../controller/ServiceComponentRequest.java     |  30 ++--
 .../controller/ServiceComponentResponse.java    |  18 +--
 .../server/controller/ServiceRequest.java       |  20 +--
 .../server/controller/ServiceResponse.java      |  20 +--
 .../internal/ClientConfigResourceProvider.java  |   2 +-
 .../internal/ComponentResourceProvider.java     |  38 ++---
 .../internal/HostComponentResourceProvider.java |  28 ++--
 .../internal/ServiceResourceProvider.java       | 112 +++++++-------
 .../controller/utilities/DatabaseChecker.java   |  43 +++---
 .../events/ServiceComponentInstalledEvent.java  |   7 +-
 .../ServiceComponentUninstalledEvent.java       |   6 +-
 .../ambari/server/events/ServiceEvent.java      |  12 +-
 .../server/events/ServiceInstalledEvent.java    |   8 +-
 .../server/events/ServiceRemovedEvent.java      |   6 +-
 .../upgrade/HostVersionOutOfSyncListener.java   |  16 +-
 .../orm/entities/ClusterServiceEntity.java      |  16 +-
 .../org/apache/ambari/server/state/Cluster.java |   2 +-
 .../org/apache/ambari/server/state/Service.java |   4 +-
 .../ambari/server/state/ServiceComponent.java   |   2 +-
 .../server/state/ServiceComponentHost.java      |   6 +-
 .../server/state/ServiceComponentImpl.java      |   4 +-
 .../ambari/server/state/ServiceFactory.java     |   6 +-
 .../apache/ambari/server/state/ServiceImpl.java |  51 ++++---
 .../apache/ambari/server/state/StackInfo.java   |   7 +-
 .../server/state/cluster/ClusterImpl.java       |  10 +-
 .../ambari/server/state/host/HostImpl.java      |   6 +-
 .../state/repository/VersionDefinitionXml.java  |   2 +-
 .../svccomphost/ServiceComponentHostImpl.java   |  14 +-
 .../ServiceComponentHostSummary.java            |   8 +-
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   2 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   2 +-
 .../src/main/resources/key_properties.json      |   2 +-
 .../src/main/resources/properties.json          |   2 +-
 .../app/controllers/global/update_controller.js |   2 +-
 .../main/admin/kerberos/step3_controller.js     |   2 +-
 .../controllers/main/service/add_controller.js  |   2 +-
 .../app/controllers/wizard/step8_controller.js  |   4 +-
 .../app/controllers/wizard/step9_controller.js  |   2 +-
 ambari-web/app/mappers/service_mapper.js        |  12 +-
 .../app/mappers/service_metrics_mapper.js       |  34 ++---
 .../app/mixins/common/configs/configs_saver.js  |   2 +-
 .../wizard/wizardProgressPageController.js      |   4 +-
 ambari-web/app/utils/ajax/ajax.js               |   6 +-
 59 files changed, 486 insertions(+), 474 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java b/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
index 698f2ab..b570287 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
@@ -23,12 +23,12 @@ public class ServiceComponentNotFoundException
     extends ObjectNotFoundException {
 
   public ServiceComponentNotFoundException (String clusterName,
-      String serviceName, String serviceDisplayName, String serviceGroupName, String serviceComponentName) {
+      String serviceName, String serviceType, String serviceGroupName, String serviceComponentName) {
     super("ServiceComponent not found"
         + ", clusterName=" + clusterName
-        + ", serviceDisplayName=" + serviceDisplayName
         + ", serviceGroupName=" + serviceGroupName
         + ", serviceName=" + serviceName
+        + ", serviceType=" + serviceType
         + ", serviceComponentName=" + serviceComponentName);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java b/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
index 24cd7ae..d374b74 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
@@ -21,10 +21,10 @@ package org.apache.ambari.server;
 @SuppressWarnings("serial")
 public class ServiceNotFoundException extends ObjectNotFoundException {
 
-  public ServiceNotFoundException(String clusterName, String serviceDisplayName) {
+  public ServiceNotFoundException(String clusterName, String serviceName) {
     super("Service not found"
         + ", clusterName=" + clusterName
-        + ", serviceDisplayName=" + serviceDisplayName);
+        + ", serviceName=" + serviceName);
   }
 
   public ServiceNotFoundException(String clusterName, Long serviceId) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index a5b4d08..8d633d5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -162,7 +162,7 @@ public class HeartbeatMonitor implements Runnable {
         // mark all components that are not clients with unknown status
         for (Cluster cluster : clusters.getClustersForHost(hostObj.getHostName())) {
           for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostObj.getHostName())) {
-            Service s = cluster.getService(sch.getServiceDisplayName());
+            Service s = cluster.getService(sch.getServiceName());
             ServiceComponent sc = s.getServiceComponent(sch.getServiceComponentName());
             if (!sc.isClientComponent() &&
               !sch.getState().equals(State.INIT) &&
@@ -237,16 +237,16 @@ public class HeartbeatMonitor implements Runnable {
   private StatusCommand createStatusCommand(String hostname, Cluster cluster,
       ServiceComponentHost sch, Map<String, DesiredConfig> desiredConfigs) throws AmbariException {
     String serviceName = sch.getServiceName();
-    String serviceDisplayName = sch.getServiceDisplayName();
+    String serviceType = sch.getServiceType();
     String componentName = sch.getServiceComponentName();
 
     StackId stackId = sch.getDesiredStackId();
 
     ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-        stackId.getStackVersion(), serviceDisplayName);
+        stackId.getStackVersion(), serviceType);
     ComponentInfo componentInfo = ambariMetaInfo.getComponent(
             stackId.getStackName(), stackId.getStackVersion(),
-            serviceDisplayName, componentName);
+            serviceType, componentName);
     StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
         stackId.getStackVersion());
 
@@ -307,7 +307,7 @@ public class HeartbeatMonitor implements Runnable {
     StatusCommand statusCmd = new StatusCommand();
     statusCmd.setClusterName(cluster.getClusterName());
     statusCmd.setServiceName(serviceName);
-    statusCmd.setServiceDisplayName(serviceDisplayName);
+    statusCmd.setServiceType(serviceType);
     statusCmd.setComponentName(componentName);
     statusCmd.setConfigurations(configurations);
     statusCmd.setConfigurationAttributes(configurationAttributes);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index e99b0e0..3dae84b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -306,7 +306,7 @@ public class HeartbeatProcessor extends AbstractService{
 
           ComponentInfo componentInfo =
               ambariMetaInfo.getComponent(stackId.getStackName(),
-                  stackId.getStackVersion(), scHost.getServiceDisplayName(),
+                  stackId.getStackVersion(), scHost.getServiceType(),
                   scHost.getServiceComponentName());
 
           String status = scHost.getState().name();
@@ -509,8 +509,8 @@ public class HeartbeatProcessor extends AbstractService{
                         "RESTART".equals(report.getCustomCommand()))))
                 && null != report.getConfigurationTags()
                 && !report.getConfigurationTags().isEmpty()) {
-              LOG.info("Updating applied config on serviceDisplayName " + scHost.getServiceDisplayName() +
-                  ", serviceName " + scHost.getServiceName() +
+              LOG.info("Updating applied config on serviceName " + scHost.getServiceName() +
+                  ", serviceType " + scHost.getServiceType() +
                   ", component " + scHost.getServiceComponentName() + ", host " + scHost.getHostName());
               scHost.updateActualConfigs(report.getConfigurationTags());
               scHost.setRestartRequired(false);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
index d778999..a5b3622 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
@@ -39,8 +39,8 @@ public class StatusCommand extends AgentCommand {
   @SerializedName("serviceName")
   private String serviceName;
 
-  @SerializedName("serviceDisplayName")
-  private String serviceDisplayName;
+  @SerializedName("serviceType")
+  private String serviceType;
 
   @SerializedName("componentName")
   private String componentName;
@@ -120,10 +120,10 @@ public class StatusCommand extends AgentCommand {
     this.serviceName = serviceName;
   }
 
-  public String getServiceDisplayName() { return serviceDisplayName; }
+  public String getServiceType() { return serviceType; }
 
-  public void setServiceDisplayName(String serviceDisplayName) {
-    this.serviceDisplayName = serviceDisplayName;
+  public void setServiceType(String serviceType) {
+    this.serviceType = serviceType;
   }
 
   public String getComponentName() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index fd15105..8883ced 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -287,34 +287,34 @@ public class AmbariMetaInfo {
   /**
    * Get components by service
    *
-   * @param stackName     stack name
-   * @param version       stack version
-   * @param serviceName   service name
+   * @param stackName         stack name
+   * @param version           stack version
+   * @param stackServiceName  stack service name
    * @return List of ComponentInfo objects
    * @throws AmbariException
    */
-  public List<ComponentInfo> getComponentsByService(String stackName, String version, String serviceName)
+  public List<ComponentInfo> getComponentsByService(String stackName, String version, String stackServiceName)
       throws AmbariException {
 
     ServiceInfo service;
     try {
-      service = getService(stackName, version, serviceName);
+      service = getService(stackName, version, stackServiceName);
     } catch (StackAccessException e) {
       throw new ParentObjectNotFoundException("Parent Service resource doesn't exist. stackName=" +
-          stackName + ", stackVersion=" + version + ", serviceName=" + serviceName);
+          stackName + ", stackVersion=" + version + ", stackServiceName=" + stackServiceName);
     }
     return service.getComponents();
   }
 
-  public ComponentInfo getComponent(String stackName, String version, String serviceDisplayName,
+  public ComponentInfo getComponent(String stackName, String version, String stackServiceName,
                                     String componentName) throws AmbariException {
 
-    ComponentInfo component = getService(stackName, version, serviceDisplayName).getComponentByName(componentName);
+    ComponentInfo component = getService(stackName, version, stackServiceName).getComponentByName(componentName);
 
     if (component == null) {
       throw new StackAccessException("stackName=" + stackName
           + ", stackVersion=" + version
-          + ", serviceDisplayName=" + serviceDisplayName
+          + ", stackServiceName=" + stackServiceName
           + ", componentName=" + componentName);
     }
     return component;
@@ -459,10 +459,10 @@ public class AmbariMetaInfo {
   /*
    * support isValidService(), isValidComponent for a given stack/version
    */
-  public boolean isValidService(String stackName, String version, String serviceName){
+  public boolean isValidService(String stackName, String version, String stackServiceName){
 
     try {
-      getService(stackName, version, serviceName);
+      getService(stackName, version, stackServiceName);
       return true;
     } catch (AmbariException e) {
       return false;
@@ -473,9 +473,9 @@ public class AmbariMetaInfo {
    * support isValidService(), isValidComponent for a given stack/version
    */
   public boolean isValidServiceComponent(String stackName, String version,
-                                         String serviceName, String componentName) {
+                                         String stackServiceName, String componentName) {
     try {
-      getService(stackName, version, serviceName).getComponentByName(componentName);
+      getService(stackName, version, stackServiceName).getComponentByName(componentName);
       return true;
     } catch (AmbariException e) {
       return false;
@@ -549,30 +549,30 @@ public class AmbariMetaInfo {
    */
   public ServiceInfo getService(Service service) throws AmbariException {
     StackId stackId = service.getDesiredStackId();
-    return getService(stackId.getStackName(), stackId.getStackVersion(), service.getName());
+    return getService(stackId.getStackName(), stackId.getStackVersion(), service.getServiceType());
   }
 
-  public ServiceInfo getService(String stackName, String version, String serviceName) throws AmbariException {
-    ServiceInfo service = getStack(stackName, version).getService(serviceName);
+  public ServiceInfo getService(String stackName, String version, String stackServiceName) throws AmbariException {
+    ServiceInfo service = getStack(stackName, version).getService(stackServiceName);
 
     if (service == null) {
       throw new StackAccessException("stackName=" + stackName + ", stackVersion=" +
-                                     version + ", serviceName=" + serviceName);
+                                     version + ", stackServiceName=" + stackServiceName);
     }
 
     return service;
   }
 
-  public boolean isServiceRemovedInStack(String stackName, String version, String serviceName) throws AmbariException{
+  public boolean isServiceRemovedInStack(String stackName, String version, String stackServiceName) throws AmbariException{
     StackInfo stack = getStack(stackName, version);
     List<String> removedServices = stack.getRemovedServices();
-    return removedServices.contains(serviceName);
+    return removedServices.contains(stackServiceName);
   }
 
-  public boolean isServiceWithNoConfigs(String stackName, String version, String serviceName) throws AmbariException{
+  public boolean isServiceWithNoConfigs(String stackName, String version, String stackServiceName) throws AmbariException{
     StackInfo stack = getStack(stackName, version);
     List<String> servicesWithNoConfigs = stack.getServicesWithNoConfigs();
-    return servicesWithNoConfigs.contains(serviceName);
+    return servicesWithNoConfigs.contains(stackServiceName);
   }
 
   public Collection<String> getMonitoringServiceNames(String stackName, String version)
@@ -730,10 +730,10 @@ public class AmbariMetaInfo {
     return result;
   }
 
-  public Set<PropertyInfo> getServiceProperties(String stackName, String version, String serviceName)
+  public Set<PropertyInfo> getServiceProperties(String stackName, String version, String stackServiceName)
       throws AmbariException {
 
-    return new HashSet<>(getService(stackName, version, serviceName).getProperties());
+    return new HashSet<>(getService(stackName, version, stackServiceName).getProperties());
   }
 
   public Set<PropertyInfo> getStackProperties(String stackName, String version)
@@ -742,17 +742,17 @@ public class AmbariMetaInfo {
     return new HashSet<>(getStack(stackName, version).getProperties());
   }
 
-  public Set<PropertyInfo> getPropertiesByName(String stackName, String version, String serviceName, String propertyName)
+  public Set<PropertyInfo> getPropertiesByName(String stackName, String version, String stackServiceName, String propertyName)
       throws AmbariException {
 
-    Set<PropertyInfo> properties = serviceName == null ?
+    Set<PropertyInfo> properties = stackServiceName == null ?
       getStackProperties(stackName, version)
-      : getServiceProperties(stackName, version, serviceName);
+      : getServiceProperties(stackName, version, stackServiceName);
 
     if (properties.size() == 0) {
       throw new StackAccessException("stackName=" + stackName
           + ", stackVersion=" + version
-          + ", serviceName=" + serviceName
+          + ", stackServiceName=" + stackServiceName
           + ", propertyName=" + propertyName);
     }
 
@@ -767,7 +767,7 @@ public class AmbariMetaInfo {
     if (propertyResult.isEmpty()) {
       throw new StackAccessException("stackName=" + stackName
           + ", stackVersion=" + version
-          + ", serviceName=" + serviceName
+          + ", stackServiceName=" + stackServiceName
           + ", propertyName=" + propertyName);
     }
 
@@ -911,12 +911,12 @@ public class AmbariMetaInfo {
    * Return metrics for a stack service.
    */
   public Map<String, Map<String, List<MetricDefinition>>> getServiceMetrics(String stackName,
-            String stackVersion, String serviceName) throws AmbariException {
+            String stackVersion, String stackServiceName) throws AmbariException {
 
-    ServiceInfo svc = getService(stackName, stackVersion, serviceName);
+    ServiceInfo svc = getService(stackName, stackVersion, stackServiceName);
 
     if (null == svc.getMetricsFile() || !svc.getMetricsFile().exists()) {
-      LOG.debug("Metrics file for {}/{}/{} not found.", stackName, stackVersion, serviceName);
+      LOG.debug("Metrics file for {}/{}/{} not found.", stackName, stackVersion, stackServiceName);
       return null;
     }
 
@@ -1022,11 +1022,11 @@ public class AmbariMetaInfo {
    * @return the list of defined metrics.
    */
   public List<MetricDefinition> getMetrics(String stackName, String stackVersion,
-      String serviceName, String componentName, String metricType)
+      String stackServiceName, String componentName, String metricType)
       throws AmbariException {
 
     Map<String, Map<String, List<MetricDefinition>>> map = getServiceMetrics(
-      stackName, stackVersion, serviceName);
+      stackName, stackVersion, stackServiceName);
 
     if (map != null && map.containsKey(componentName)) {
       if (map.get(componentName).containsKey(metricType)) {
@@ -1044,16 +1044,16 @@ public class AmbariMetaInfo {
    *          the stack name
    * @param stackVersion
    *          the stack version
-   * @param serviceName
-   *          the service name
+   * @param stackServiceName
+   *          the stack service name
    * @return the alert definitions for a stack or an empty list if none (never
    *         {@code null}).
    * @throws AmbariException
    */
   public Set<AlertDefinition> getAlertDefinitions(String stackName, String stackVersion,
-                                                  String serviceName) throws AmbariException {
+                                                  String stackServiceName) throws AmbariException {
 
-    ServiceInfo svc = getService(stackName, stackVersion, serviceName);
+    ServiceInfo svc = getService(stackName, stackVersion, stackServiceName);
     return getAlertDefinitions(svc);
   }
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
index dfe117e..60230f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
@@ -62,23 +62,23 @@ public class ComponentService extends BaseService {
   /**
    * Parent service display name.
    */
-  private String m_serviceDisplayName;
+  private String m_serviceName;
 
   /**
    * Constructor.
    *
    * @param clusterName           cluster name
    * @param serviceGroupName      servicegroup name
-   * @param serviceDisplayName    service display name
+   * @param serviceName           service name
    */
-  public ComponentService(String clusterName, String serviceGroupName, String serviceDisplayName) {
+  public ComponentService(String clusterName, String serviceGroupName, String serviceName) {
     m_clusterName = clusterName;
     m_serviceGroupName = serviceGroupName;
-    m_serviceDisplayName = serviceDisplayName;
+    m_serviceName = serviceName;
   }
 
   /**
-   * Handles GET: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components/{componentName}
+   * Handles GET: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components/{componentName}
    * Get a specific component.
    *
    * @param headers       http headers
@@ -99,7 +99,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles GET: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components
+   * Handles GET: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components
    * Get all components for a service.
    *
    * @param headers http headers
@@ -119,7 +119,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components
+   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components
    * Create components by specifying an array of components in the http body.
    * This is used to create multiple components in a single request.
    *
@@ -138,7 +138,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components/{componentID}
+   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components/{componentID}
    * Create a specific component.
    *
    * @param body          http body
@@ -159,7 +159,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components/{componentID}
+   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components/{componentID}
    * Update a specific component.
    *
    * @param body          http body
@@ -179,7 +179,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components
+   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components
    * Update multiple components.
    *
    * @param body          http body
@@ -196,7 +196,7 @@ public class ComponentService extends BaseService {
   }
 
   /**
-   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/components/{componentID}
+   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/components/{componentID}
    * Delete a specific component.
    *
    * @param headers     http headers
@@ -225,7 +225,7 @@ public class ComponentService extends BaseService {
     Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
     mapIds.put(Resource.Type.Cluster, m_clusterName == null? null : m_clusterName.toString());
     mapIds.put(Resource.Type.ServiceGroup, m_serviceGroupName == null? null : m_serviceGroupName.toString());
-    mapIds.put(Resource.Type.Service, m_serviceDisplayName == null? null : m_serviceDisplayName.toString());
+    mapIds.put(Resource.Type.Service, m_serviceName == null? null : m_serviceName.toString());
     mapIds.put(Resource.Type.Component, componentName);
 
     return createResource(Resource.Type.Component, mapIds);
@@ -235,15 +235,15 @@ public class ComponentService extends BaseService {
                                       String componentName) {
     Map<Resource.Type,String> mapIds = new HashMap<>();
     mapIds.put(Resource.Type.Cluster, m_clusterName);
-    mapIds.put(Resource.Type.Service, m_serviceDisplayName);
+    mapIds.put(Resource.Type.Service, m_serviceName);
     mapIds.put(Resource.Type.Component, componentName);
     String filePrefixName;
 
     if (StringUtils.isEmpty(componentName)) {
-      if (StringUtils.isEmpty(m_serviceDisplayName)) {
+      if (StringUtils.isEmpty(m_serviceName)) {
         filePrefixName = m_clusterName + "(" + Resource.InternalType.Cluster.toString().toUpperCase()+")";
       } else {
-        filePrefixName = m_serviceDisplayName + "(" + Resource.InternalType.Service.toString().toUpperCase()+")";
+        filePrefixName = m_serviceName + "(" + Resource.InternalType.Service.toString().toUpperCase()+")";
       }
     } else {
       filePrefixName = componentName;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
index 7a5e9ab..12f4bca 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
@@ -78,16 +78,16 @@ public class ServiceService extends BaseService {
   }
 
   /**
-   * Handles URL: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}
+   * Handles URL: /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}
    * Get a specific service.
    *
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    * @return service resource representation
    */
   @GET
-  @Path("{serviceDisplayName}")
+  @Path("{serviceName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Get the details of a service",
       nickname = "ServiceService#getService",
@@ -104,10 +104,10 @@ public class ServiceService extends BaseService {
       @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR)
   })
   public Response getService(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                             @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName) {
+                             @ApiParam @PathParam("serviceName") String serviceName) {
 
     return handleRequest(headers, body, ui, Request.Type.GET,
-        createServiceResource(m_clusterName, m_serviceGroupName, serviceDisplayName));
+        createServiceResource(m_clusterName, m_serviceGroupName, serviceName));
   }
 
   /**
@@ -147,17 +147,17 @@ public class ServiceService extends BaseService {
   }
 
   /**
-   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}
+   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}
    * Create a specific service.
    *
    * @param body                http body
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    * @return information regarding the created service
    */
   @POST
-  @Path("{serviceDisplayName}")
+  @Path("{serviceName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Creates a service",
       nickname = "ServiceService#createServices"
@@ -176,9 +176,9 @@ public class ServiceService extends BaseService {
       @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR),
   })
   public Response createService(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName) {
+                                @ApiParam @PathParam("serviceName") String serviceName) {
     return handleRequest(headers, body, ui, Request.Type.POST,
-        createServiceResource(m_clusterName, m_serviceGroupName, serviceDisplayName));
+        createServiceResource(m_clusterName, m_serviceGroupName, serviceName));
   }
 
   /**
@@ -215,17 +215,17 @@ public class ServiceService extends BaseService {
   }
 
   /**
-   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}
+   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}
    * Update a specific service.
    *
    * @param body                http body
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName service display name
+   * @param serviceName service name
    * @return information regarding the updated service
    */
   @PUT
-  @Path("{serviceDisplayName}")
+  @Path("{serviceName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Updates a service",
       nickname = "ServiceService#updateService"
@@ -243,8 +243,8 @@ public class ServiceService extends BaseService {
       @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR),
   })
   public Response updateService(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName) {
-    return handleRequest(headers, body, ui, Request.Type.PUT, createServiceResource(m_clusterName, m_serviceGroupName, serviceDisplayName));
+                                @ApiParam @PathParam("serviceName") String serviceName) {
+    return handleRequest(headers, body, ui, Request.Type.PUT, createServiceResource(m_clusterName, m_serviceGroupName, serviceName));
   }
 
   /**
@@ -279,16 +279,16 @@ public class ServiceService extends BaseService {
   }
 
   /**
-   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}
+   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}
    * Delete a specific service.
    *
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    * @return information regarding the deleted service
    */
   @DELETE
-  @Path("{serviceDisplayName}")
+  @Path("{serviceName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Deletes a service",
       nickname = "ServiceService#deleteService"
@@ -301,47 +301,47 @@ public class ServiceService extends BaseService {
       @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR),
   })
   public Response deleteService(@Context HttpHeaders headers, @Context UriInfo ui,
-                                @ApiParam(required = true) @PathParam("serviceDisplayName") String serviceDisplayName) {
-    return handleRequest(headers, null, ui, Request.Type.DELETE, createServiceResource(m_clusterName, m_serviceGroupName, serviceDisplayName));
+                                @ApiParam(required = true) @PathParam("serviceName") String serviceName) {
+    return handleRequest(headers, null, ui, Request.Type.DELETE, createServiceResource(m_clusterName, m_serviceGroupName, serviceName));
   }
 
   /**
    * Get the components sub-resource.
    *
-   * @param serviceDisplayName service display name
+   * @param serviceName service name
    * @return the components service
    */
-  @Path("{serviceDisplayName}/components")
+  @Path("{serviceName}/components")
   // TODO: find a way to handle this with Swagger (refactor or custom annotation?)
-  public ComponentService getComponentHandler(@PathParam("serviceDisplayName") String serviceDisplayName) {
+  public ComponentService getComponentHandler(@PathParam("serviceName") String serviceName) {
 
-    return new ComponentService(m_clusterName, m_serviceGroupName, serviceDisplayName);
+    return new ComponentService(m_clusterName, m_serviceGroupName, serviceName);
   }
 
   /**
    * Gets the alerts sub-resource.
    */
-  @Path("{serviceDisplayName}/alerts")
+  @Path("{serviceName}/alerts")
   // TODO: find a way to handle this with Swagger (refactor or custom annotation?)
   public AlertService getAlertHandler(
-      @PathParam("serviceDisplayName") String serviceDisplayName) {
-    return new AlertService(m_clusterName, serviceDisplayName, null);
+      @PathParam("serviceName") String serviceName) {
+    return new AlertService(m_clusterName, serviceName, null);
   }
 
   /**
-   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts/{artifactName}
+   * Handles: POST /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts/{artifactName}
    * Create a service artifact instance.
    *
    * @param body                http body
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    * @param artifactName  artifact name
    *
    * @return information regarding the created artifact
    */
   @POST
-  @Path("{serviceDisplayName}/artifacts/{artifactName}")
+  @Path("{serviceName}/artifacts/{artifactName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Creates a service artifact",
       nickname = "ServiceService#createArtifact"
@@ -362,26 +362,26 @@ public class ServiceService extends BaseService {
   public Response createArtifact(String body,
                                  @Context HttpHeaders headers,
                                  @Context UriInfo ui,
-                                 @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName,
+                                 @ApiParam @PathParam("serviceName") String serviceName,
                                  @ApiParam @PathParam("artifactName") String artifactName) {
 
     return handleRequest(headers, body, ui, Request.Type.POST,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, artifactName));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, artifactName));
   }
 
   /**
-   * Handles: GET /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts
+   * Handles: GET /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts
    * Get all service artifacts.
    *
    * @param body                    http body
    * @param headers                 http headers
    * @param ui                      uri info
-   * @param serviceDisplayName      service display name
+   * @param serviceName      service name
    *
    * @return artifact collection resource representation
    */
   @GET
-  @Path("{serviceDisplayName}/artifacts")
+  @Path("{serviceName}/artifacts")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Get all service artifacts",
       nickname = "ServiceService#getArtifacts",
@@ -406,26 +406,26 @@ public class ServiceService extends BaseService {
   public Response getArtifacts(String body,
                               @Context HttpHeaders headers,
                               @Context UriInfo ui,
-                              @PathParam("serviceDisplayName") String serviceDisplayName) {
+                              @PathParam("serviceName") String serviceName) {
 
     return handleRequest(headers, body, ui, Request.Type.GET,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, null));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, null));
   }
 
   /**
-   * Handles: GET /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts/{artifactName}
+   * Handles: GET /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts/{artifactName}
    * Gat a service artifact instance.
    *
    * @param body                    http body
    * @param headers                 http headers
    * @param ui                      uri info
-   * @param serviceDisplayName      service display name
+   * @param serviceName      service name
    * @param artifactName  artifact name
    *
    * @return artifact instance resource representation
    */
   @GET
-  @Path("{serviceDisplayName}/artifacts/{artifactName}")
+  @Path("{serviceName}/artifacts/{artifactName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Get the details of a service artifact",
       nickname = "ServiceService#getArtifact",
@@ -450,24 +450,24 @@ public class ServiceService extends BaseService {
   public Response getArtifact(String body,
                                  @Context HttpHeaders headers,
                                  @Context UriInfo ui,
-                                 @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName,
+                                 @ApiParam @PathParam("serviceName") String serviceName,
                                  @ApiParam @PathParam("artifactName") String artifactName) {
     return handleRequest(headers, body, ui, Request.Type.GET,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, artifactName));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, artifactName));
   }
 
   /**
-   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts
+   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts
    * Update all artifacts matching the provided predicate.
    *
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    *
    * @return information regarding the updated artifacts
    */
   @PUT
-  @Path("{serviceDisplayName}/artifacts")
+  @Path("{serviceName}/artifacts")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Updates multiple artifacts",
       nickname = "ServiceService#updateArtifacts"
@@ -487,25 +487,25 @@ public class ServiceService extends BaseService {
   public Response updateArtifacts(String body,
                                   @Context HttpHeaders headers,
                                   @Context UriInfo ui,
-                                  @ApiParam @PathParam("serviceDisplayName") String serviceDisplayName) {
+                                  @ApiParam @PathParam("serviceName") String serviceName) {
 
     return handleRequest(headers, body, ui, Request.Type.PUT,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, null));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, null));
   }
 
   /**
-   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts/{artifactName}
+   * Handles: PUT /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts/{artifactName}
    * Update a specific artifact.
    *
    * @param headers                 http headers
    * @param ui                      uri info
-   * @param serviceDisplayName      service display name
+   * @param serviceName      service name
    * @param artifactName            artifact name
    *
    * @return information regarding the updated artifact
    */
   @PUT
-  @Path("{serviceDisplayName}/artifacts/{artifactName}")
+  @Path("{serviceName}/artifacts/{artifactName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Updates a single artifact",
       nickname = "ServiceService#updateArtifact"
@@ -525,25 +525,25 @@ public class ServiceService extends BaseService {
   public Response updateArtifact(String body,
                                  @Context HttpHeaders headers,
                                  @Context UriInfo ui,
-                                 @ApiParam(required = true) @PathParam("serviceDisplayName") String serviceDisplayName,
+                                 @ApiParam(required = true) @PathParam("serviceName") String serviceName,
                                  @ApiParam(required = true) @PathParam("artifactName") String artifactName) {
 
     return handleRequest(headers, body, ui, Request.Type.PUT,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, artifactName));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, artifactName));
   }
 
   /**
-   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts
+   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts
    * Delete all artifacts matching the provided predicate.
    *
    * @param headers             http headers
    * @param ui                  uri info
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    *
    * @return information regarding the deleted artifacts
    */
   @DELETE
-  @Path("{serviceDisplayName}/artifacts")
+  @Path("{serviceName}/artifacts")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Deletes all artifacts of a service that match the provided predicate",
     nickname = "ServiceService#deleteArtifacts"
@@ -558,25 +558,25 @@ public class ServiceService extends BaseService {
   public Response deleteArtifacts(String body,
                                   @Context HttpHeaders headers,
                                   @Context UriInfo ui,
-                                  @ApiParam(required = true) @PathParam("serviceDisplayName") String serviceDisplayName) {
+                                  @ApiParam(required = true) @PathParam("serviceName") String serviceName) {
 
     return handleRequest(headers, body, ui, Request.Type.DELETE,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, null));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, null));
   }
 
   /**
-   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceDisplayName}/artifacts/{artifactName}
+   * Handles: DELETE /clusters/{clusterName}/servicegroups/{serviceGroupName}/services/{serviceName}/artifacts/{artifactName}
    * Delete a specific artifact.
    *
    * @param headers                 http headers
    * @param ui                      uri info
-   * @param serviceDisplayName      service display name
+   * @param serviceName      service name
    * @param artifactName            artifact name
    *
    * @return information regarding the deleted artifact
    */
   @DELETE
-  @Path("{serviceDisplayName}/artifacts/{artifactName}")
+  @Path("{serviceName}/artifacts/{artifactName}")
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Deletes a single service artifact",
       nickname = "ServiceService#deleteArtifact"
@@ -591,11 +591,11 @@ public class ServiceService extends BaseService {
   public Response deleteArtifact(String body,
                                  @Context HttpHeaders headers,
                                  @Context UriInfo ui,
-                                 @ApiParam(required = true) @PathParam("serviceDisplayName") String serviceDisplayName,
+                                 @ApiParam(required = true) @PathParam("serviceName") String serviceName,
                                  @ApiParam(required = true) @PathParam("artifactName") String artifactName) {
 
     return handleRequest(headers, body, ui, Request.Type.DELETE,
-        createArtifactResource(m_clusterName, m_serviceGroupName, serviceDisplayName, artifactName));
+        createArtifactResource(m_clusterName, m_serviceGroupName, serviceName, artifactName));
   }
 
   /**
@@ -603,33 +603,33 @@ public class ServiceService extends BaseService {
    *
    * @param request
    *          the request
-   * @param serviceDisplayName
-   *          the service display name
+   * @param serviceName
+   *          the service name
    *
    * @return the alert history service
    */
-  @Path("{serviceDisplayName}/alert_history")
+  @Path("{serviceName}/alert_history")
   // TODO: find a way to handle this with Swagger (refactor or custom annotation?)
   public AlertHistoryService getAlertHistoryService(
       @Context javax.ws.rs.core.Request request,
-      @PathParam("serviceDisplayName") String serviceDisplayName) {
+      @PathParam("serviceName") String serviceName) {
 
-    return new AlertHistoryService(m_clusterName, serviceDisplayName, null);
+    return new AlertHistoryService(m_clusterName, serviceName, null);
   }
 
   /**
    * Create a service resource instance.
    *
    * @param clusterName         cluster name
-   * @param serviceDisplayName  service display name
+   * @param serviceName  service name
    *
    * @return a service resource instance
    */
-  ResourceInstance createServiceResource(String clusterName, String serviceGroupName, String serviceDisplayName) {
+  ResourceInstance createServiceResource(String clusterName, String serviceGroupName, String serviceName) {
     Map<Resource.Type,String> mapIds = new HashMap<>();
     mapIds.put(Resource.Type.Cluster, clusterName);
     mapIds.put(Resource.Type.ServiceGroup, serviceGroupName);
-    mapIds.put(Resource.Type.Service, serviceDisplayName);
+    mapIds.put(Resource.Type.Service, serviceName);
 
     return createResource(Resource.Type.Service, mapIds);
   }
@@ -638,16 +638,16 @@ public class ServiceService extends BaseService {
    * Create an artifact resource instance.
    *
    * @param clusterName           cluster name
-   * @param serviceDisplayName    service display name
+   * @param serviceName    service name
    * @param artifactName          artifact name
    *
    * @return an artifact resource instance
    */
-  ResourceInstance createArtifactResource(String clusterName, String serviceGroupName, String serviceDisplayName, String artifactName) {
+  ResourceInstance createArtifactResource(String clusterName, String serviceGroupName, String serviceName, String artifactName) {
     Map<Resource.Type,String> mapIds = new HashMap<>();
     mapIds.put(Resource.Type.Cluster, clusterName);
     mapIds.put(Resource.Type.ServiceGroup, serviceGroupName);
-    mapIds.put(Resource.Type.Service, serviceDisplayName);
+    mapIds.put(Resource.Type.Service, serviceName);
     mapIds.put(Resource.Type.Artifact, artifactName);
 
     return createResource(Resource.Type.Artifact, mapIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
index e9c9059..349b260 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
@@ -99,7 +99,7 @@ public class SecondaryNamenodeDeletedCheck extends AbstractCheckDescriptor {
       List<HostComponentStateEntity> allHostComponents = hostComponentStateDao.findAll();
       for(HostComponentStateEntity hc : allHostComponents) {
         Service s = cluster.getService(hc.getServiceId());
-        if (s.getName().equalsIgnoreCase(HDFS_SERVICE_NAME) && hc.getComponentName().equalsIgnoreCase(SECONDARY_NAMENODE)) {
+        if (s.getServiceType().equalsIgnoreCase(HDFS_SERVICE_NAME) && hc.getComponentName().equalsIgnoreCase(SECONDARY_NAMENODE)) {
           hosts.add(hc.getHostName());
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index e9fa8db..9977d0c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -606,33 +606,33 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         throw new AuthorizationException("The authenticated user is not authorized to install service components on to hosts");
       }
 
-      if (StringUtils.isEmpty(request.getServiceDisplayName())) {
-        request.setServiceDisplayName(findService(cluster, request.getComponentName()));
+      if (StringUtils.isEmpty(request.getServiceName())) {
+        request.setServiceName(findService(cluster, request.getComponentName()));
         // TODO : What if request.getServiceGroupName() is null ? Get it from service instead.
       }
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Received a createHostComponent request, clusterName={}, serviceGroupName={}, serviceName={}, componentName={}, hostname={}, request={}",
-          request.getClusterName(), request.getServiceGroupName(), request.getServiceDisplayName(), request.getComponentName(), request.getHostname(), request);
+          request.getClusterName(), request.getServiceGroupName(), request.getServiceName(), request.getComponentName(), request.getHostname(), request);
       }
 
       if (!hostComponentNames.containsKey(request.getClusterName())) {
         hostComponentNames.put(request.getClusterName(), new HashMap<>());
       }
       if (!hostComponentNames.get(request.getClusterName())
-          .containsKey(request.getServiceDisplayName())) {
+          .containsKey(request.getServiceName())) {
         hostComponentNames.get(request.getClusterName()).put(
-            request.getServiceDisplayName(), new HashMap<String, Set<String>>());
+            request.getServiceName(), new HashMap<String, Set<String>>());
       }
       if (!hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceDisplayName())
+          .get(request.getServiceName())
           .containsKey(request.getComponentName())) {
         hostComponentNames.get(request.getClusterName())
-            .get(request.getServiceDisplayName()).put(request.getComponentName(),
+            .get(request.getServiceName()).put(request.getComponentName(),
                 new HashSet<String>());
       }
       if (hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceDisplayName())
+          .get(request.getServiceName())
           .get(request.getComponentName())
           .contains(request.getHostname())) {
         duplicates.add("[clusterName=" + request.getClusterName() + ", hostName=" + request.getHostname() +
@@ -640,7 +640,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         continue;
       }
       hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceDisplayName()).get(request.getComponentName())
+          .get(request.getServiceName()).get(request.getComponentName())
           .add(request.getHostname());
 
       if (request.getDesiredState() != null
@@ -656,10 +656,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
       Service s;
       try {
-        s = cluster.getService(request.getServiceDisplayName());
+        s = cluster.getService(request.getServiceName());
       } catch (ServiceNotFoundException e) {
         throw new IllegalArgumentException(
-            "The service[" + request.getServiceDisplayName() + "] associated with the component[" +
+            "The service[" + request.getServiceName() + "] associated with the component[" +
             request.getComponentName() + "] doesn't exist for the cluster[" + request.getClusterName() + "]");
       }
       ServiceComponent sc = s.getServiceComponent(
@@ -743,7 +743,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     for (ServiceComponentHostRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceDisplayName());
+      Service s = cluster.getService(request.getServiceName());
       ServiceComponent sc = s.getServiceComponent(
           request.getComponentName());
 
@@ -1236,7 +1236,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
 
     if (request.getComponentName() != null) {
-      if (StringUtils.isBlank(request.getServiceDisplayName())) {
+      if (StringUtils.isBlank(request.getServiceName())) {
 
         // !!! FIXME the assumption that a component is unique across all stacks is a ticking
         // time bomb.  Blueprints are making this assumption.
@@ -1247,13 +1247,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
           throw new ServiceComponentHostNotFoundException(
               cluster.getClusterName(), null, request.getComponentName(), request.getHostname());
         }
-        request.setServiceDisplayName(serviceName);
+        request.setServiceName(serviceName);
       }
     }
 
     Set<Service> services = new HashSet<>();
-    if (request.getServiceDisplayName() != null && !request.getServiceDisplayName().isEmpty()) {
-      services.add(cluster.getService(request.getServiceDisplayName()));
+    if (request.getServiceName() != null && !request.getServiceName().isEmpty()) {
+      services.add(cluster.getService(request.getServiceName()));
     } else {
       services.addAll(cluster.getServices().values());
     }
@@ -1363,7 +1363,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
             response.add(r);
           } catch (ServiceComponentHostNotFoundException e) {
-            if (request.getServiceDisplayName() == null || request.getComponentName() == null) {
+            if (request.getServiceName() == null || request.getComponentName() == null) {
               // Ignore the exception if either the service name or component name are not specified.
               // This is an artifact of how we get host_components and can happen in the case where
               // we get all host_components for a host, for example.
@@ -1375,7 +1375,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               // condition.
               LOG.debug("ServiceComponentHost not found ", e);
               throw new ServiceComponentHostNotFoundException(cluster.getClusterName(),
-                  request.getServiceDisplayName(), request.getComponentName(), request.getHostname());
+                  request.getServiceName(), request.getComponentName(), request.getHostname());
             }
           }
         } else {
@@ -3535,14 +3535,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   private void checkIfHostComponentsInDeleteFriendlyState(ServiceComponentHostRequest request, Cluster cluster) throws AmbariException {
-    Service service = cluster.getService(request.getServiceDisplayName());
+    Service service = cluster.getService(request.getServiceName());
     ServiceComponent component = service.getServiceComponent(request.getComponentName());
     ServiceComponentHost componentHost = component.getServiceComponentHost(request.getHostname());
 
     if (!componentHost.canBeRemoved()) {
       throw new AmbariException("Host Component cannot be removed"
               + ", clusterName=" + request.getClusterName()
-              + ", serviceDisplayName=" + request.getServiceDisplayName()
+              + ", serviceName=" + request.getServiceName()
               + ", componentName=" + request.getComponentName()
               + ", hostname=" + request.getHostname()
               + ", request=" + request);
@@ -3560,7 +3560,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   @Override
   public String findService(Cluster cluster, String componentName) throws AmbariException {
-    return cluster.getServiceByComponentName(componentName).getServiceDisplayName();
+    return cluster.getServiceByComponentName(componentName).getName();
   }
 
   /**
@@ -3681,18 +3681,18 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
-      if (StringUtils.isEmpty(request.getServiceDisplayName())) {
-        request.setServiceDisplayName(findService(cluster, request.getComponentName()));
+      if (StringUtils.isEmpty(request.getServiceName())) {
+        request.setServiceName(findService(cluster, request.getComponentName()));
       }
 
       LOG.info("Received a hostComponent DELETE request"
         + ", clusterName=" + request.getClusterName()
-        + ", serviceDisplayName=" + request.getServiceDisplayName()
+        + ", serviceName=" + request.getServiceName()
         + ", componentName=" + request.getComponentName()
         + ", hostname=" + request.getHostname()
         + ", request=" + request);
 
-      Service service = cluster.getService(request.getServiceDisplayName());
+      Service service = cluster.getService(request.getServiceName());
       ServiceComponent component = service.getServiceComponent(request.getComponentName());
       ServiceComponentHost componentHost = component.getServiceComponentHost(request.getHostname());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
index 7c4c9ab..ea082c6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
@@ -24,7 +24,7 @@ public class ServiceComponentHostRequest {
 
   private String clusterName; // REF
   private String serviceGroupName;
-  private String serviceDisplayName;
+  private String serviceName;
   private String componentName;
   private String hostname;
   private String publicHostname;
@@ -44,7 +44,7 @@ public class ServiceComponentHostRequest {
     super();
     this.clusterName = clusterName;
     this.serviceGroupName = serviceGroupName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceName = serviceName;
     this.componentName = componentName;
     this.hostname = hostname;
     this.desiredState = desiredState;
@@ -63,15 +63,15 @@ public class ServiceComponentHostRequest {
   /**
    * @return the serviceName
    */
-  public String getServiceDisplayName() {
-    return serviceDisplayName;
+  public String getServiceName() {
+    return serviceName;
   }
 
   /**
-   * @param serviceDisplayName the serviceName to set
+   * @param serviceName the serviceName to set
    */
-  public void setServiceDisplayName(String serviceDisplayName) {
-    this.serviceDisplayName = serviceDisplayName;
+  public void setServiceName(String serviceName) {
+    this.serviceName = serviceName;
   }
 
   /**
@@ -161,7 +161,7 @@ public class ServiceComponentHostRequest {
     StringBuilder sb = new StringBuilder();
     sb.append("{" + " clusterName=").append(clusterName)
       .append(", serviceGroupName=").append(serviceGroupName)
-      .append(", serviceDisplayName=").append(serviceDisplayName)
+      .append(", serviceName=").append(serviceName)
       .append(", componentName=").append(componentName)
       .append(", hostname=").append(hostname)
       .append(", publicHostname=").append(publicHostname)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index ebfbf5c..c420a50 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -32,7 +32,7 @@ public class ServiceComponentHostResponse {
   private String serviceGroupName;
   private Long serviceId;
   private String serviceName;
-  private String serviceDisplayName;
+  private String serviceType;
   private Long hostComponentId;
   private String componentName;
   private String displayName;
@@ -51,7 +51,7 @@ public class ServiceComponentHostResponse {
   private UpgradeState upgradeState = UpgradeState.NONE;
 
   public ServiceComponentHostResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName,
-                                      Long serviceId, String serviceName, String serviceDisplayName, Long hostComponentId,
+                                      Long serviceId, String serviceName, String serviceType, Long hostComponentId,
                                       String componentName, String displayName, String hostname, String publicHostname,
                                       String liveState, String version, String desiredState, String desiredStackVersion,
                                       String desiredRepositoryVersion, HostComponentAdminState adminState) {
@@ -61,7 +61,7 @@ public class ServiceComponentHostResponse {
     this.serviceId = serviceId;
     this.clusterName = clusterName;
     this.serviceName = serviceName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceType = serviceType;
     this.hostComponentId = hostComponentId;
     this.componentName = componentName;
     this.displayName = displayName;
@@ -134,12 +134,12 @@ public class ServiceComponentHostResponse {
   /**
    * @return the serviceName
    */
-  public String getServiceDisplayName() { return serviceDisplayName; }
+  public String getServiceType() { return serviceType; }
 
   /**
-   * @param serviceDisplayName the serviceDisplayName to set
+   * @param serviceType the serviceType to set
    */
-  public void setServiceDisplayName(String serviceDisplayName) { this.serviceDisplayName = serviceDisplayName; }
+  public void setServiceType(String serviceType) { this.serviceType = serviceType; }
 
   /**
    * @return the componentName
@@ -328,8 +328,8 @@ public class ServiceComponentHostResponse {
       return false;
     }
 
-    if (serviceDisplayName != null ?
-            !serviceDisplayName.equals(that.serviceDisplayName) : that.serviceDisplayName != null) {
+    if (serviceType != null ?
+            !serviceType.equals(that.serviceType) : that.serviceType != null) {
       return false;
     }
 
@@ -359,7 +359,7 @@ public class ServiceComponentHostResponse {
     result = 71 * result + (serviceGroupName != null ? serviceGroupName.hashCode() : 0);
     result = 71 * result + (serviceId != null ? serviceId.hashCode() : 0);
     result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (serviceDisplayName != null ? serviceDisplayName.hashCode() : 0);
+    result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0);
     result = 71 * result + (componentName != null ? componentName.hashCode() : 0);
     result = 71 * result + (displayName != null ? displayName.hashCode() : 0);
     result = 71 * result + (hostname != null ? hostname.hashCode() : 0);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
index 1556b70..f59eb98 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
@@ -23,29 +23,29 @@ public class ServiceComponentRequest {
 
   private String clusterName; // REF
   private String serviceGroupName;
-  private String serviceDisplayName; // GET/CREATE/UPDATE/DELETE
+  private String serviceName; // GET/CREATE/UPDATE/DELETE
   private String componentName; // GET/CREATE/UPDATE/DELETE
   private String desiredState; // CREATE/UPDATE
   private String componentCategory;
   private String recoveryEnabled; // CREATE/UPDATE
 
-  public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceDisplayName,
+  public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName,
                                  String componentName, String desiredState) {
-    this(clusterName, serviceGroupName, serviceDisplayName, componentName, desiredState, null, null);
+    this(clusterName, serviceGroupName, serviceName, componentName, desiredState, null, null);
   }
 
-  public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceDisplayName, String componentName,
+  public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName, String componentName,
                                  String desiredState, String recoveryEnabled) {
-    this(clusterName, serviceGroupName, serviceDisplayName, componentName, desiredState, recoveryEnabled, null);
+    this(clusterName, serviceGroupName, serviceName, componentName, desiredState, recoveryEnabled, null);
   }
 
   public ServiceComponentRequest(String clusterName, String serviceGroupName,
-                                 String serviceDisplayName, String componentName,
+                                 String serviceName, String componentName,
                                  String desiredState, String recoveryEnabled,
                                  String componentCategory) {
     this.clusterName = clusterName;
     this.serviceGroupName = serviceGroupName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceName = serviceName;
     this.componentName = componentName;
     this.desiredState = desiredState;
     this.recoveryEnabled = recoveryEnabled;
@@ -65,15 +65,15 @@ public class ServiceComponentRequest {
   /**
    * @return the serviceName
    */
-  public String getServiceDisplayName() {
-    return serviceDisplayName;
+  public String getServiceName() {
+    return serviceName;
   }
 
   /**
-   * @param serviceDisplayName the serviceDisplayName to set
+   * @param serviceName the serviceName to set
    */
-  public void setServiceDisplayName(String serviceDisplayName) {
-    this.serviceDisplayName = serviceDisplayName;
+  public void setServiceName(String serviceName) {
+    this.serviceName = serviceName;
   }
 
 
@@ -141,8 +141,8 @@ public class ServiceComponentRequest {
 
   @Override
   public String toString() {
-    return String.format("[clusterName=%s, serviceGroupName=%s, serviceDisplayName=%s, componentName=%s, " +
-                         "desiredState=%s, recoveryEnabled=%s, componentCategory=%s]", clusterName, serviceGroupName,
-                          serviceDisplayName, clusterName, desiredState, recoveryEnabled, componentCategory);
+    return String.format("[clusterName=%s, serviceGroupName=%s, serviceName=%s, componentName=%s, " +
+      "desiredState=%s, recoveryEnabled=%s, componentCategory=%s]", clusterName, serviceGroupName,
+      serviceName, clusterName, desiredState, recoveryEnabled, componentCategory);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
index b96fb8a..d63b33c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
@@ -32,7 +32,7 @@ public class ServiceComponentResponse {
   private String serviceGroupName; // REF
   private Long serviceId; // REF
   private String serviceName;
-  private String serviceDisplayName;
+  private String serviceType;
   private String componentName;
   private String displayName;
   private String desiredStackId;
@@ -44,7 +44,7 @@ public class ServiceComponentResponse {
   private RepositoryVersionState repoState;
 
   public ServiceComponentResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName,
-                                  Long serviceId, String serviceName, String serviceDisplayName, String componentName,
+                                  Long serviceId, String serviceName, String serviceType, String componentName,
                                   StackId desiredStackId, String desiredState, Map<String, Integer> serviceComponentStateCount,
                                   boolean recoveryEnabled, String displayName, String desiredVersion,
                                   RepositoryVersionState repoState) {
@@ -54,7 +54,7 @@ public class ServiceComponentResponse {
     this.serviceGroupName = serviceGroupName;
     this.serviceId = serviceId;
     this.serviceName = serviceName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceType = serviceType;
     this.componentName = componentName;
     this.displayName = displayName;
     this.desiredStackId = desiredStackId.getStackId();
@@ -82,12 +82,12 @@ public class ServiceComponentResponse {
   /**
    * @return the serviceName
    */
-  public String getServiceDisplayName() { return serviceDisplayName; }
+  public String getServiceType() { return serviceType; }
 
   /**
-   * @param serviceDisplayName the serviceDisplayName to set
+   * @param serviceType the serviceType to set
    */
-  public void setServiceDisplayName(String serviceDisplayName) { this.serviceDisplayName = serviceDisplayName; }
+  public void setServiceType(String serviceType) { this.serviceType = serviceType; }
 
   /**
    * @return the serviceId
@@ -288,8 +288,8 @@ public class ServiceComponentResponse {
       return false;
     }
 
-    if (serviceDisplayName != null ?
-            !serviceDisplayName.equals(that.serviceDisplayName) : that.serviceDisplayName != null) {
+    if (serviceType != null ?
+            !serviceType.equals(that.serviceType) : that.serviceType != null) {
       return false;
     }
 
@@ -314,7 +314,7 @@ public class ServiceComponentResponse {
     result = 71 * result + (serviceGroupName != null ? serviceGroupName.hashCode() : 0);
     result = 71 * result + (serviceId != null ? serviceId.hashCode() : 0);
     result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (serviceDisplayName != null ? serviceDisplayName.hashCode() : 0);
+    result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0);
     result = 71 * result + (componentName != null ? componentName.hashCode():0);
     result = 71 * result + (displayName != null ? displayName.hashCode():0);
     return result;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
index 59a700e..70e5240 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
@@ -26,7 +26,7 @@ public class ServiceRequest {
 
   private String clusterName; // REF
   private String serviceName; // GET/CREATE/DELETE
-  private String serviceDisplayName; // GET/CREATE/UPDATE/DELETE
+  private String serviceType; // GET/CREATE/UPDATE/DELETE
   private String serviceGroupName;
   private String desiredState; // CREATE/UPDATE
   private String maintenanceState; // UPDATE
@@ -49,7 +49,7 @@ public class ServiceRequest {
   public ServiceRequest(String clusterName,
                         String serviceGroupName,
                         String serviceName,
-                        String serviceDisplayName,
+                        String serviceType,
                         Long desiredRepositoryVersionId,
                         String desiredState,
                         String credentialStoreEnabled,
@@ -61,7 +61,7 @@ public class ServiceRequest {
 
     this.desiredRepositoryVersionId = desiredRepositoryVersionId;
 
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceType = serviceType;
     this.desiredState = desiredState;
     this.credentialStoreEnabled = credentialStoreEnabled;
     // Credential store supported cannot be changed after
@@ -77,20 +77,20 @@ public class ServiceRequest {
   public String getServiceName() { return serviceName; }
 
   /**
-   * @param serviceName the Service Display Name to set
+   * @param serviceName the service name to set
    */
   public void setServiceName(String serviceName) { this.serviceName = serviceName; }
 
   /**
-   * @return the service display name
+   * @return the service type
    */
-  @ApiModelProperty(name = "service_display_name")
-  public String getServiceDisplayName() { return serviceDisplayName; }
+  @ApiModelProperty(name = "service_type")
+  public String getServiceType() { return serviceType; }
 
   /**
-   * @param serviceDisplayName the Service Display Name to set
+   * @param serviceType the service type to set
    */
-  public void setServiceDisplayName(String serviceDisplayName) { this.serviceDisplayName = serviceDisplayName; }
+  public void setServiceType(String serviceType) { this.serviceType = serviceType; }
 
   /**
    * @return the service group Name
@@ -181,7 +181,7 @@ public class ServiceRequest {
     StringBuilder sb = new StringBuilder();
     sb.append("clusterName=" + clusterName
       + ", serviceGroupName=" + serviceGroupName
-      + ", serviceDisplayName=" + serviceDisplayName
+      + ", serviceName=" + serviceName
       + ", desiredState=" + desiredState
       + ", maintenanceState=" + maintenanceState
       + ", credentialStoreEnabled=" + credentialStoreEnabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
index f528b4e..73a2c93 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
@@ -31,7 +31,7 @@ public class ServiceResponse {
   private String serviceGroupName;
   private Long serviceId;
   private String serviceName;
-  private String serviceDisplayName;
+  private String serviceType;
   private StackId desiredStackId;
   private String desiredRepositoryVersion;
   private Long desiredRepositoryVersionId;
@@ -42,7 +42,7 @@ public class ServiceResponse {
   private boolean credentialStoreEnabled;
 
   public ServiceResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName,
-                         Long serviceId, String serviceName, String serviceDisplayName, StackId desiredStackId,
+                         Long serviceId, String serviceName, String serviceType, StackId desiredStackId,
                          String desiredRepositoryVersion, RepositoryVersionState repositoryVersionState, String desiredState,
                          boolean credentialStoreSupported, boolean credentialStoreEnabled) {
     this.clusterId = clusterId;
@@ -51,7 +51,7 @@ public class ServiceResponse {
     this.serviceGroupName = serviceGroupName;
     this.serviceId = serviceId;
     this.serviceName = serviceName;
-    this.serviceDisplayName = serviceDisplayName;
+    this.serviceType = serviceType;
     this.desiredStackId = desiredStackId;
     this.repositoryVersionState = repositoryVersionState;
     setDesiredState(desiredState);
@@ -137,14 +137,14 @@ public class ServiceResponse {
   public void setServiceId(Long serviceId) { this.serviceId = serviceId; }
 
   /**
-   * @return the real serviceName
+   * @return the serviceType (i.e. stack service name)
    */
-  public String getServiceDisplayName() { return serviceDisplayName; }
+  public String getServiceType() { return serviceType; }
 
   /**
-   * @param serviceDisplayName the real serviceName to set
+   * @param serviceType the serviceType (i.e. stack service name) to set
    */
-  public void setserviceDisplayName(String serviceDisplayName) { this.serviceDisplayName = serviceDisplayName; }
+  public void setServiceType(String serviceType) { this.serviceType = serviceType; }
 
   /**
    * @return the desiredState
@@ -220,8 +220,8 @@ public class ServiceResponse {
             !serviceName.equals(that.serviceName) : that.serviceName != null) {
       return false;
     }
-    if (serviceDisplayName != null ?
-            !serviceDisplayName.equals(that.serviceDisplayName) : that.serviceDisplayName != null) {
+    if (serviceType != null ?
+            !serviceType.equals(that.serviceType) : that.serviceType != null) {
       return false;
     }
 
@@ -287,7 +287,7 @@ public class ServiceResponse {
     result = 71 * result + (serviceGroupName != null ? serviceGroupName.hashCode() : 0);
     result = 71 * result + (serviceId != null ? serviceId.hashCode() : 0);
     result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (serviceDisplayName != null ? serviceDisplayName.hashCode() : 0);
+    result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0);
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index ef34c6a..652d667 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -200,7 +200,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
 
     ServiceComponentHostRequest schRequest =  requests.iterator().next();
     String requestComponentName = schRequest.getComponentName();
-    String requestServiceName = schRequest.getServiceDisplayName();
+    String requestServiceName = schRequest.getServiceName();
     String requestHostName =  schRequest.getHostname();
 
     Map<String,List<ServiceComponentHostResponse>> serviceToComponentMap = new HashMap<>();


[13/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by ja...@apache.org.
AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e61556cc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e61556cc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e61556cc

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: e61556cc28c593ce9ab58274302b5de4982601d1
Parents: 2fb4649
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Fri Oct 6 11:05:04 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Fri Oct 6 11:05:04 2017 -0700

----------------------------------------------------------------------
 .../DRUID/0.10.1/configuration/druid-broker.xml | 106 +++++++
 .../DRUID/0.10.1/configuration/druid-common.xml | 270 ++++++++++++++++
 .../0.10.1/configuration/druid-coordinator.xml  |  43 +++
 .../DRUID/0.10.1/configuration/druid-env.xml    | 248 +++++++++++++++
 .../0.10.1/configuration/druid-historical.xml   |  94 ++++++
 .../DRUID/0.10.1/configuration/druid-log4j.xml  |  84 +++++
 .../0.10.1/configuration/druid-logrotate.xml    |  68 ++++
 .../configuration/druid-middlemanager.xml       | 122 ++++++++
 .../0.10.1/configuration/druid-overlord.xml     |  52 ++++
 .../DRUID/0.10.1/configuration/druid-router.xml |  59 ++++
 .../common-services/DRUID/0.10.1/metainfo.xml   | 223 ++++++++++++++
 .../DRUID/0.10.1/package/scripts/broker.py      |  28 ++
 .../DRUID/0.10.1/package/scripts/coordinator.py |  28 ++
 .../DRUID/0.10.1/package/scripts/druid.py       | 307 +++++++++++++++++++
 .../DRUID/0.10.1/package/scripts/druid_node.py  | 114 +++++++
 .../DRUID/0.10.1/package/scripts/historical.py  |  28 ++
 .../0.10.1/package/scripts/middlemanager.py     |  28 ++
 .../DRUID/0.10.1/package/scripts/overlord.py    |  28 ++
 .../DRUID/0.10.1/package/scripts/params.py      | 200 ++++++++++++
 .../DRUID/0.10.1/package/scripts/router.py      |  28 ++
 .../0.10.1/package/scripts/service_check.py     |  44 +++
 .../0.10.1/package/scripts/status_params.py     |  24 ++
 .../DRUID/0.10.1/quicklinks/quicklinks.json     |  37 +++
 .../DRUID/0.10.1/role_command_order.json        |  17 +
 .../DRUID/0.10.1/themes/theme.json              | 120 ++++++++
 .../DRUID/0.9.2/configuration/druid-broker.xml  | 106 -------
 .../DRUID/0.9.2/configuration/druid-common.xml  | 270 ----------------
 .../0.9.2/configuration/druid-coordinator.xml   |  43 ---
 .../DRUID/0.9.2/configuration/druid-env.xml     | 248 ---------------
 .../0.9.2/configuration/druid-historical.xml    |  94 ------
 .../DRUID/0.9.2/configuration/druid-log4j.xml   |  84 -----
 .../0.9.2/configuration/druid-logrotate.xml     |  68 ----
 .../0.9.2/configuration/druid-middlemanager.xml | 122 --------
 .../0.9.2/configuration/druid-overlord.xml      |  52 ----
 .../DRUID/0.9.2/configuration/druid-router.xml  |  59 ----
 .../common-services/DRUID/0.9.2/metainfo.xml    | 223 --------------
 .../DRUID/0.9.2/package/scripts/broker.py       |  28 --
 .../DRUID/0.9.2/package/scripts/coordinator.py  |  28 --
 .../DRUID/0.9.2/package/scripts/druid.py        | 307 -------------------
 .../DRUID/0.9.2/package/scripts/druid_node.py   | 114 -------
 .../DRUID/0.9.2/package/scripts/historical.py   |  28 --
 .../0.9.2/package/scripts/middlemanager.py      |  28 --
 .../DRUID/0.9.2/package/scripts/overlord.py     |  28 --
 .../DRUID/0.9.2/package/scripts/params.py       | 200 ------------
 .../DRUID/0.9.2/package/scripts/router.py       |  28 --
 .../0.9.2/package/scripts/service_check.py      |  44 ---
 .../0.9.2/package/scripts/status_params.py      |  24 --
 .../DRUID/0.9.2/quicklinks/quicklinks.json      |  37 ---
 .../DRUID/0.9.2/role_command_order.json         |  17 -
 .../DRUID/0.9.2/themes/theme.json               | 120 --------
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml  |   5 +-
 .../test/python/stacks/2.6/DRUID/test_druid.py  |   2 +-
 52 files changed, 2403 insertions(+), 2404 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
new file mode 100644
index 0000000..6146ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/broker</value>
+    <description>The druid.service name of broker node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8082</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.http.numConnections</name>
+    <value>20</value>
+    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.useCache</name>
+    <value>true</value>
+    <description>Enable the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.populateCache</name>
+    <value>true</value>
+    <description>Populate the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.type</name>
+    <value>local</value>
+    <description>The type of cache to use for queries.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.sizeInBytes</name>
+    <value>10000000</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>Maximum cache size in bytes. Zero disables caching.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
new file mode 100644
index 0000000..d3b53cd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
@@ -0,0 +1,270 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.extensions.pullList</name>
+    <value>[]</value>
+    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.extensions.repositoryList</name>
+    <value>[]</value>
+    <description>A comma-separated list of maven repositories to download extensions.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.extensions.loadList</name>
+    <value>["druid-datasketches"]
+    </value>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>A comma-separated list of one or more druid extensions to load.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.security.extensions.loadList</name>
+    <value>[]</value>
+    <description>A comma-separated list of one or more druid security extensions to load. This property will be set via the kerberos wizard and User will not be allowed to modify this when security is enabled.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.zk.service.host</name>
+    <value>localhost:2181</value>
+    <description>
+      zookeeper connection string.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.zk.paths.base</name>
+    <value>/druid</value>
+    <description>
+      Base Zookeeper path
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.discovery.curator.path</name>
+    <value>/druid/discovery</value>
+    <description>
+      Services announce themselves under this ZooKeeper path.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.type</name>
+    <value></value>
+    <description>
+      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.storageDirectory</name>
+    <value></value>
+    <description>
+      directory to use as deep storage.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.password</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Metadata storage password</display-name>
+    <description>Password for the metadata storage data base.</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.user</name>
+    <value>druid</value>
+    <display-name>Metadata storage user</display-name>
+    <description>Metadata storage user</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.port</name>
+    <value>1527</value>
+    <display-name>Metadata storage port</display-name>
+    <description>Metadata storage port</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>database_name</name>
+    <value>druid</value>
+    <display-name>Druid Metadata storage database name</display-name>
+    <description>Druid Metadata storage database name</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metastore_hostname</name>
+    <value>localhost</value>
+    <display-name>Metadata storage hostname</display-name>
+    <description>Metadata storage hostname</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.type</name>
+    <display-name>Druid Metadata storage type</display-name>
+    <value>derby</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>derby</value>
+          <label>DERBY</label>
+        </entry>
+        <entry>
+          <value>postgresql</value>
+          <label>POSTGRESQL</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
+      within the same node. Use mysql or postgres for distributed mode.
+      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.connector.connectURI</name>
+    <value>jdbc:derby://localhost:1527/druid;create=true</value>
+    <display-name>Metadata storage connector url</display-name>
+    <description>Metadata storage connector url</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>database_name</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>metastore_hostname</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.port</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.principal</name>
+    <display-name>kerberos principal</display-name>
+    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.keytab</name>
+    <display-name>Kerberos keytab location</display-name>
+    <description>Kerberos keytab location</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>druid.emitter</name>
+    <value>{{metric_emitter_type}}</value>
+    <description>Emitter used to emit metrics. Values - "noop", "logging", "ambari-metrics", or "http" will initialize
+      one of the emitter modules.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.hostname</name>
+    <value>{{metric_collector_host}}</value>
+    <description>Timeline host</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.port</name>
+    <value>{{metric_collector_port}}</value>
+    <description>Timeline port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.protocol</name>
+    <value>{{metric_collector_protocol}}</value>
+    <description>Timeline protocol(http or https)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStorePath</name>
+    <value>{{metric_truststore_path}}</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStoreType</name>
+    <value>{{metric_truststore_type}}</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStorePassword</name>
+    <value>{{metric_truststore_password}}</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.eventConverter</name>
+    <value>{"type":"whiteList"}</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.monitoring.monitors</name>
+    <value>["com.metamx.metrics.JvmMonitor"]</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
new file mode 100644
index 0000000..618f11d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/coordinator</value>
+    <description>The druid.service name of coordinator node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8081</value>
+    <description>The port on which the coordinator will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.merge.on</name>
+    <value>false</value>
+    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
+      segment size.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
new file mode 100644
index 0000000..2e96f6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
@@ -0,0 +1,248 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!--Heap Settings -->
+  <property>
+    <name>druid.broker.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.heap.memory</name>
+    <value>256</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- DirectMemorySettings -->
+  <property>
+    <name>druid.broker.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>
+        <type>druid-broker</type>
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>
+        <type>druid-broker</type>
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>
+        <type>druid-historical</type>
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>
+        <type>druid-historical</type>
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- JavaOpts Tune GC related configs here-->
+  <property>
+    <name>druid.broker.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_user</name>
+    <display-name>Druid User</display-name>
+    <value>druid</value>
+    <property-type>USER</property-type>
+    <description></description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_log_dir</name>
+    <value>/var/log/druid</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_pid_dir</name>
+    <value>/var/run/druid</value>
+    <display-name>Druid PID dir</display-name>
+    <description></description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- druid-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>druid-env template</display-name>
+    <description>This is simple template for druid-env.sh file</description>
+    <value>
+      #!/bin/bash
+
+      # Set DRUID specific environment variables here.
+
+      # The java implementation to use.
+      export JAVA_HOME={{java8_home}}
+      export PATH=$JAVA_HOME/bin:$PATH
+      export DRUID_PID_DIR={{druid_pid_dir}}
+      export DRUID_LOG_DIR={{druid_log_dir}}
+      export DRUID_CONF_DIR={{druid_conf_dir}}
+      export DRUID_LIB_DIR={{druid_home}}/lib
+      export HADOOP_CONF_DIR={{hadoop_conf_dir}}
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
new file mode 100644
index 0000000..5ff30ce
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/historical</value>
+    <description>The druid.service name of historical node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8083</value>
+    <description>The port on which the historical nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
+      values can require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>10</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.locations</name>
+    <value>[{"path":"/apps/druid/segmentCache","maxSize":300000000000}]</value>
+    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
+      then served by the Historical node. These locations define where that local cache resides.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.maxSize</name>
+    <value>300000000000</value>
+    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
+      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
+      accordingly.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.infoDir</name>
+    <value>/apps/druid/segmentCache/info_dir</value>
+    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
+      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
+      metadata is kept. Directory will be created if needed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
new file mode 100644
index 0000000..bcb731a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>druid_log_level</name>
+    <value>info</value>
+    <description>Log level for io.druid logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>root_log_level</name>
+    <value>WARN</value>
+    <description>Log level for root logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metamx_log_level</name>
+    <value>info</value>
+    <description>Log level for com.metamxlogging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>druid-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+    <Configuration>
+        <Appenders>
+            <Console name="Console" target="SYSTEM_OUT">
+                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
+            </Console>
+        </Appenders>
+        <Loggers>
+            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
+            <Logger name="io.druid" level="{{druid_log_level}}"/>
+            <Root level="{{root_log_level}}">
+                <AppenderRef ref="Console"/>
+            </Root>
+        </Loggers>
+    </Configuration>
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
new file mode 100644
index 0000000..b7308ce
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>druid_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Druid Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_log_maxbackupindex</name>
+    <value>7</value>
+    <description>The number of backup files</description>
+    <display-name>Druid Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>druid logrotate template</display-name>
+    <description>Custom logrotate file</description>
+    <value><![CDATA[
+    {{druid_log_dir}}/*.log {
+        copytruncate
+        rotate {{druid_log_maxbackupindex}}
+        daily
+        nocompress
+        missingok
+        notifempty
+        create 660 druid users
+        dateext
+        dateformat -%Y-%m-%d-%s
+        size {{druid_log_maxfilesize}}M
+        }
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
new file mode 100644
index 0000000..08280ad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/middlemanager</value>
+    <description>The druid.service name of middlemanager node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8091</value>
+    <description>The port on which the middlemanager nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.startPort</name>
+    <value>8100</value>
+    <description>The port that peons begin running on.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.worker.capacity</name>
+    <value>3</value>
+    <description>
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.javaOpts</name>
+    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
+    <description>
+      A string of -X Java options to pass to the peon's JVM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.baseTaskDir</name>
+    <value>/apps/druid/tasks</value>
+    <description>
+      Base temporary working directory for druid tasks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>
+      Number of threads for HTTP requests.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>256000000</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>
+      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
+      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>
+      The number of processing threads to have available for parallel processing of segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.hadoopWorkingPath</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Temporary working directory for Hadoop tasks
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.fork.property.hadoop.mapreduce.reduce.java.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <description>
+      Default java properties from Reducer containers
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>druid.indexer.fork.property.hadoop.mapreduce.map.java.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <description>
+      Default java properties from Map containers
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
new file mode 100644
index 0000000..57d1c63
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/overlord</value>
+    <description>The druid.service name of overlord node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8090</value>
+    <description>The port on which the overlord will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.type</name>
+    <value>remote</value>
+    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
+      environment.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.storage.type</name>
+    <value>metadata</value>
+    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
+      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
+      should fail.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
new file mode 100644
index 0000000..d544315
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/router</value>
+    <description>The druid.service name of router node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8888</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.http.numConnections</name>
+    <value>20</value>
+    <description>
+      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.tierToBrokerMap</name>
+    <value>{"_default_tier":"druid/broker"}</value>
+    <description>
+      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
+      tiers to broker names. The priority of brokers is based on the ordering.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
new file mode 100644
index 0000000..53dee2b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>DRUID</name>
+      <displayName>Druid</displayName>
+      <comment>A fast column-oriented distributed data store.</comment>
+      <version>0.10.1</version>
+      <components>
+        <component>
+          <name>DRUID_COORDINATOR</name>
+          <displayName>Druid Coordinator</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/coordinator.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-coordinator</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_OVERLORD</name>
+          <displayName>Druid Overlord</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/overlord.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-overlord</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_HISTORICAL</name>
+          <displayName>Druid Historical</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historical.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-historical</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_BROKER</name>
+          <displayName>Druid Broker</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/broker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-broker</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_MIDDLEMANAGER</name>
+          <displayName>Druid MiddleManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/middlemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-middlemanager</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_ROUTER</name>
+          <displayName>Druid Router</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/router.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-router</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>druid_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>druid-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      <configuration-dependencies>
+        <config-type>druid-common</config-type>
+        <config-type>druid-env</config-type>
+        <config-type>druid-log4j</config-type>
+        <config-type>druid-logrotate</config-type>
+        <config-type>zoo.cfg</config-type>
+      </configuration-dependencies>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
new file mode 100644
index 0000000..bd170cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidBroker(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="broker")
+
+
+if __name__ == "__main__":
+  DruidBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
new file mode 100644
index 0000000..a86fa40
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidCoordinator(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="coordinator")
+
+
+if __name__ == "__main__":
+  DruidCoordinator().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
new file mode 100644
index 0000000..ec98c3c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
@@ -0,0 +1,307 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import json
+import os
+from resource_management import Fail
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.logger import Logger
+
+
+def druid(upgrade_type=None, nodeType=None):
+  import params
+  ensure_base_directories()
+
+  # Environment Variables
+  File(format("{params.druid_conf_dir}/druid-env.sh"),
+       owner=params.druid_user,
+       content=InlineTemplate(params.druid_env_sh_template),
+       mode = 0700
+       )
+
+  # common config
+  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
+  # User cannot override below configs
+  druid_common_config['druid.host'] = params.hostname
+  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
+  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
+  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
+    'druid.service']
+  druid_common_config['druid.selectors.coordinator.serviceName'] = \
+    params.config['configurations']['druid-coordinator']['druid.service']
+  druid_common_config['druid.extensions.loadList'] = json.dumps(eval(params.druid_extensions_load_list) +
+                                                     eval(params.druid_security_extensions_load_list))
+
+  # delete the password and user if empty otherwiswe derby will fail.
+  if 'derby' == druid_common_config['druid.metadata.storage.type']:
+    del druid_common_config['druid.metadata.storage.connector.user']
+    del druid_common_config['druid.metadata.storage.connector.password']
+
+  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
+
+  PropertiesFile("common.runtime.properties",
+                 dir=params.druid_common_conf_dir,
+                 properties=druid_common_config,
+                 owner=params.druid_user,
+                 group=params.user_group,
+                 mode = 0600
+                 )
+  Logger.info("Created common.runtime.properties")
+
+  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+       mode=0644,
+       owner=params.druid_user,
+       group=params.user_group,
+       content=InlineTemplate(params.log4j_props)
+       )
+  Logger.info("Created log4j file")
+
+  File("/etc/logrotate.d/druid",
+       mode=0644,
+       owner='root',
+       group='root',
+       content=InlineTemplate(params.logrotate_props)
+       )
+
+  Logger.info("Created log rotate file")
+
+  # node specific configs
+  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
+    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
+    node_type_lowercase = node_type.lower()
+
+    # Write runtime.properties file
+    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
+    PropertiesFile("runtime.properties",
+                   dir=node_config_dir,
+                   properties=node_config,
+                   owner=params.druid_user,
+                   group=params.user_group,
+                   mode = 0600
+                   )
+    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
+
+    # Write jvm configs
+    File(format('{node_config_dir}/jvm.config'),
+         owner=params.druid_user,
+         group=params.user_group,
+         content=InlineTemplate(
+           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
+           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+           node_direct_memory=druid_env_config[
+             format('druid.{node_type_lowercase}.jvm.direct.memory')],
+           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
+         )
+    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
+    # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs
+    if ['middleManager', 'overlord'].__contains__(node_type_lowercase) and params.lzo_enabled and len(
+            params.lzo_packages) > 0:
+        try:
+            Logger.info(
+                format(
+                    "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/"))
+            Execute(
+                format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/'))
+        except Fail as ex:
+            Logger.info(format("No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar"))
+
+  # All druid nodes have dependency on hdfs_client
+  ensure_hadoop_directories()
+  download_database_connector_if_needed()
+  # Pull all required dependencies
+  pulldeps()
+
+
+def mutable_config_dict(config):
+  rv = {}
+  for key, value in config.iteritems():
+    rv[key] = value
+  return rv
+
+
+def ensure_hadoop_directories():
+  import params
+  if 'hdfs-site' not in params.config['configurations']:
+    # HDFS Not Installed nothing to do.
+    Logger.info("Skipping HDFS directory creation as HDFS not installed")
+    return
+
+  druid_common_config = params.config['configurations']['druid-common']
+  # final overlord config contains both common and overlord config
+  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
+
+  # If user is using HDFS as deep storage create HDFS Directory for storing segments
+  deep_storage = druid_common_config["druid.storage.type"]
+  storage_dir = druid_common_config["druid.storage.storageDirectory"]
+
+  if deep_storage == 'hdfs':
+    # create the home dir for druid
+    params.HdfsResource(format("/user/{params.druid_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.druid_user,
+                        group='hadoop',
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    # create the segment storage dir, users like hive from group hadoop need to write to this directory
+    create_hadoop_directory(storage_dir, mode=0775)
+
+  # Create HadoopIndexTask hadoopWorkingPath
+  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
+  if hadoop_working_path is not None:
+    if hadoop_working_path.startswith(params.hdfs_tmp_dir):
+        params.HdfsResource(params.hdfs_tmp_dir,
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.hdfs_user,
+                            mode=0777,
+                            )
+    create_hadoop_directory(hadoop_working_path, mode=0775)
+
+  # If HDFS is used for storing logs, create Index Task log directory
+  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
+  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
+  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
+    create_hadoop_directory(indexer_logs_directory)
+
+
+def create_hadoop_directory(hadoop_dir, mode=0755):
+  import params
+  params.HdfsResource(hadoop_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.druid_user,
+                      group='hadoop',
+                      mode=mode
+                      )
+  Logger.info(format("Created Hadoop Directory [{hadoop_dir}], with mode [{mode}]"))
+
+
+def ensure_base_directories():
+  import params
+  Directory(
+    [params.druid_log_dir, params.druid_pid_dir],
+    mode=0755,
+    owner=params.druid_user,
+    group=params.user_group,
+    create_parents=True,
+    recursive_ownership=True,
+  )
+
+  Directory(
+    [params.druid_conf_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
+     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
+     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir,
+     params.druid_tasks_dir],
+    mode=0700,
+    cd_access='a',
+    owner=params.druid_user,
+    group=params.user_group,
+    create_parents=True,
+    recursive_ownership=True,
+  )
+
+  segment_cache_locations = json.loads(params.druid_segment_cache_locations)
+  for segment_cache_location in segment_cache_locations:
+    Directory(
+      segment_cache_location["path"],
+      mode=0700,
+      owner=params.druid_user,
+      group=params.user_group,
+      create_parents=True,
+      recursive_ownership=True,
+      cd_access='a'
+    )
+
+
+
+def get_daemon_cmd(params=None, node_type=None, command=None):
+  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
+
+
+def getPid(params=None, nodeType=None):
+  return format('{params.druid_pid_dir}/{nodeType}.pid')
+
+
+def pulldeps():
+  import params
+  extensions_list = eval(params.druid_extensions)
+  extensions_string = '{0}'.format("-c ".join(extensions_list))
+  repository_list = eval(params.druid_repo_list)
+  repository_string = '{0}'.format("-r ".join(repository_list))
+  if len(extensions_list) > 0:
+    try:
+      # Make sure druid user has permissions to write dependencies
+      Directory(
+        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
+        mode=0755,
+        cd_access='a',
+        owner=params.druid_user,
+        group=params.user_group,
+        create_parents=True,
+        recursive_ownership=True,
+      )
+      pull_deps_command = format(
+        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
+        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
+        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop")
+
+      if len(repository_list) > 0:
+        pull_deps_command = format("{pull_deps_command} -r {repository_string}")
+
+      Execute(pull_deps_command,
+              user=params.druid_user
+              )
+      Logger.info(format("Pull Dependencies Complete"))
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+
+def download_database_connector_if_needed():
+  """
+  Downloads the database connector to use when connecting to the metadata storage
+  """
+  import params
+  if params.metadata_storage_type != 'mysql' or not params.jdbc_driver_jar:
+    return
+
+  File(params.check_db_connection_jar,
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}"))
+       )
+
+  target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
+
+  if not os.path.exists(target_jar_with_directory):
+    File(params.downloaded_custom_connector,
+         content=DownloadSource(params.connector_curl_source))
+
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
+            path=["/bin", "/usr/bin/"],
+            sudo=True)
+
+    File(target_jar_with_directory, owner=params.druid_user,
+         group=params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
new file mode 100644
index 0000000..8053dcb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
@@ -0,0 +1,114 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.core import sudo
+from resource_management import Script
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.show_logs import show_logs
+from druid import druid, get_daemon_cmd, getPid
+
+
+class DruidBase(Script):
+  def __init__(self, nodeType=None):
+    self.nodeType = nodeType
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    node_type_lower = self.nodeType.lower()
+    Logger.info(format("Executing druid-{node_type_lower} Upgrade pre-restart"))
+    import params
+
+    env.set_params(params)
+
+    if params.stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version):
+      stack_select.select_packages(params.stack_version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type=upgrade_type)
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
+    # Verify Database connection on Druid start
+    if params.metadata_storage_type == 'mysql':
+      if not params.jdbc_driver_jar or not os.path.isfile(params.connector_download_dir + os.path.sep + params.jdbc_driver_jar):
+        path_to_jdbc =  params.connector_download_dir + os.path.sep + "*"
+        error_message = "Error! Sorry, but we can't find jdbc driver for mysql.So, db connection check can fail." + \
+                        "Please run 'ambari-server setup --jdbc-db=mysql --jdbc-driver={path_to_jdbc} on server host.'"
+        Logger.error(error_message)
+      else:
+        path_to_jdbc = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
+      db_connection_check_command = format("{params.java8_home}/bin/java -cp {params.check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{params.metadata_storage_url}' {params.metadata_storage_user} {params.metadata_storage_password!p} com.mysql.jdbc.Driver")
+    else:
+      db_connection_check_command = None
+
+    if db_connection_check_command:
+      sudo.chmod(params.check_db_connection_jar, 0755)
+      Execute( db_connection_check_command,
+               tries=5,
+               try_sleep=10,
+               user=params.druid_user
+               )
+
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = getPid(status_params, self.nodeType)
+    check_process_status(pid_file)
+
+  def get_log_folder(self):
+    import params
+    return params.druid_log_dir
+
+  def get_user(self):
+    import params
+    return params.druid_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
new file mode 100644
index 0000000..22390a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidHistorical(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="historical")
+
+
+if __name__ == "__main__":
+  DruidHistorical().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
new file mode 100644
index 0000000..20df89c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidMiddleManager(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="middleManager")
+
+
+if __name__ == "__main__":
+  DruidMiddleManager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
new file mode 100644
index 0000000..e4d7fcc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidOverlord(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="overlord")
+
+
+if __name__ == "__main__":
+  DruidOverlord().execute()


[14/50] [abbrv] ambari git commit: AMBARI-22164: Update services API to user service_name, service_type instead of service_display_name, service_name (jluniya)

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 8548cf3..56d352e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -1157,9 +1157,9 @@ public class HostImpl implements Host {
     HostEntity hostEntity = getHostEntity();
 
     for (HostComponentStateEntity componentState : hostEntity.getHostComponentStateEntities()) {
-      String serviceDisplayName = componentState.getServiceComponentDesiredStateEntity().getClusterServiceEntity().getServiceName();
-      ComponentInfo component = ambariMetaInfo.getComponent(stackId.getStackName(), stackId.getStackVersion(),
-                                                            serviceDisplayName, componentState.getComponentName());
+      String serviceType = componentState.getServiceComponentDesiredStateEntity().getClusterServiceEntity().getServiceType();
+      ComponentInfo component = ambariMetaInfo.getComponent(
+        stackId.getStackName(), stackId.getStackVersion(), serviceType, componentState.getComponentName());
 
       if (component.isVersionAdvertised()) {
         return true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
index ba035ab..a19238e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
@@ -302,7 +302,7 @@ public class VersionDefinitionXml {
         continue;
       }
 
-      ServiceVersionSummary summary = new ServiceVersionSummary(service.getServiceDisplayName());
+      ServiceVersionSummary summary = new ServiceVersionSummary(service.getDisplayName());
       summaries.put(service.getName(), summary);
 
       String serviceVersion = service.getDesiredRepositoryVersion().getVersion();

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 54cbc3d..2697b40 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -812,7 +812,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
     // publish the service component installed event
     ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(getClusterId(),
-        stackId.getStackName(), stackId.getStackVersion(), getServiceName(), getServiceDisplayName(),
+        stackId.getStackName(), stackId.getStackVersion(), getServiceName(), getServiceType(),
         getServiceGroupName(), getServiceComponentName(), getHostName(), isRecoveryEnabled());
 
     eventPublisher.publish(event);
@@ -1068,8 +1068,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Override
-  public String getServiceDisplayName() {
-    return serviceComponent.getServiceDisplayName();
+  public String getServiceType() {
+    return serviceComponent.getServiceType();
   }
 
   @Override
@@ -1175,7 +1175,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
     String clusterName = serviceComponent.getClusterName();
     Long clusterId = serviceComponent.getClusterId();
-    String serviceName = serviceComponent.getServiceDisplayName();
+    String serviceName = serviceComponent.getServiceName();
     Cluster cluster = null;
     Service service = null;
     try {
@@ -1211,7 +1211,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     }
 
     ServiceComponentHostResponse r = new ServiceComponentHostResponse(clusterId, clusterName, service.getServiceGroupId(),
-            service.getServiceGroupName(), service.getServiceId(), service.getName(), service.getServiceDisplayName(),
+            service.getServiceGroupName(), service.getServiceId(), service.getName(), service.getServiceType(),
             hostComponentId, serviceComponentName, displayName, hostName, publicHostName, state, getVersion(),
             desiredState, desiredStackId, desiredRepositoryVersion, componentAdminState);
 
@@ -1307,14 +1307,14 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       String stackVersion = stackId.getStackVersion();
       String stackName = stackId.getStackName();
       String serviceName = getServiceName();
-      String serviceDisplayName = getServiceDisplayName();
+      String serviceType = getServiceType();
       String serviceGroupName  = getServiceGroupName();
       String componentName = getServiceComponentName();
       String hostName = getHostName();
       boolean recoveryEnabled = isRecoveryEnabled();
 
       ServiceComponentUninstalledEvent event = new ServiceComponentUninstalledEvent(
-          clusterId, stackName, stackVersion, serviceName, serviceDisplayName, serviceGroupName, componentName,
+          clusterId, stackName, stackVersion, serviceName, serviceType, serviceGroupName, componentName,
           hostName, recoveryEnabled);
 
       eventPublisher.publish(event);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostSummary.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostSummary.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostSummary.java
index 4615ff6..7edabc6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostSummary.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostSummary.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -67,9 +68,10 @@ public class ServiceComponentHostSummary {
     String stackVersion = repositoryVersion.getStackVersion();
 
     for (HostComponentStateEntity hostComponentStateEntity : allHostComponents) {
-      String serviceDisplayName  = hostComponentStateEntity.getServiceComponentDesiredStateEntity().
-        getClusterServiceEntity().getServiceDisplayName();
-      ComponentInfo compInfo = ambariMetaInfo.getComponent(stackName, stackVersion, serviceDisplayName,
+      ClusterServiceEntity serviceEntity = hostComponentStateEntity.getServiceComponentDesiredStateEntity().getClusterServiceEntity();
+      String serviceName = serviceEntity.getServiceName();
+      String serviceType  = serviceEntity.getServiceType();
+      ComponentInfo compInfo = ambariMetaInfo.getComponent(stackName, stackVersion, serviceType,
                                                            hostComponentStateEntity.getComponentName());
 
       if (!compInfo.isVersionAdvertised()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 51bd31f..b8c0a42 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -162,7 +162,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
   service_group_id BIGINT NOT NULL,
   service_enabled INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 2fa9f56..c56e486 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -181,7 +181,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
   service_group_id BIGINT NOT NULL,
   service_enabled INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 9443266..8917d48 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -163,7 +163,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id NUMBER(19) NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id NUMBER(19) NOT NULL,
   service_group_id NUMBER(19) NOT NULL,
   service_enabled NUMBER(10) NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index eda83fa..f48bcd6 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -162,7 +162,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
   service_group_id BIGINT NOT NULL,
   service_enabled INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index 3da609e..e7c5c72 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -161,7 +161,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id NUMERIC(19) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id NUMERIC(19) NOT NULL,
   service_group_id NUMERIC(19) NOT NULL,
   service_enabled INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 0d09854..f87af4c 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -175,7 +175,7 @@ CREATE TABLE servicegroups (
 CREATE TABLE clusterservices (
   id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  service_display_name VARCHAR(255) NOT NULL,
+  service_type VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
   service_group_id BIGINT NOT NULL,
   service_enabled INT NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/key_properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/key_properties.json b/ambari-server/src/main/resources/key_properties.json
index 17d7cc4..73eb039 100644
--- a/ambari-server/src/main/resources/key_properties.json
+++ b/ambari-server/src/main/resources/key_properties.json
@@ -10,7 +10,7 @@
     "Cluster": "HostRoles/cluster_name",
     "ServiceGroup": "HostRoles/service_group_name",
     "Host": "HostRoles/host_name",
-    "Service": "HostRoles/service_display_name",
+    "Service": "HostRoles/service_name",
     "HostComponent": "HostRoles/component_name",
     "Component": "HostRoles/component_name"
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 2f8bb7c..3e73217 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -61,7 +61,7 @@
         "HostRoles/maintenance_state",
         "HostRoles/service_id",
         "HostRoles/service_name",
-        "HostRoles/service_display_name",
+        "HostRoles/service_type",
         "HostRoles/upgrade_state",
         "_"
     ],

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index ace608f..0758686 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -457,7 +457,7 @@ App.UpdateController = Em.Controller.extend({
       realUrl = '/components/?' + flumeHandlerParam + atsHandlerParam + haComponents +
         'ServiceComponentInfo/category=MASTER&fields=' +
         'ServiceComponentInfo/service_name,' +
-        'ServiceComponentInfo/service_display_name,' +
+        'ServiceComponentInfo/service_type,' +
         'host_components/HostRoles/display_name,' +
         'host_components/HostRoles/host_name,' +
         'host_components/HostRoles/public_host_name,' +

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/controllers/main/admin/kerberos/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/kerberos/step3_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step3_controller.js
index ee6153b..15be4f9 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step3_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step3_controller.js
@@ -45,7 +45,7 @@ App.KerberosWizardStep3Controller = App.KerberosProgressPageController.extend({
           data: {
             context: Em.I18n.t('requestInfo.kerberosService'),
             ServiceInfo: {"state": "INSTALLED"},
-            urlParams: "ServiceInfo/state=INSTALLED&ServiceInfo/service_display_name=KERBEROS"
+            urlParams: "ServiceInfo/state=INSTALLED&ServiceInfo/service_name=KERBEROS"
           },
           success: 'startPolling',
           error: 'onTaskError'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/controllers/main/service/add_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/add_controller.js b/ambari-web/app/controllers/main/service/add_controller.js
index 0c54e30..c11bcf2 100644
--- a/ambari-web/app/controllers/main/service/add_controller.js
+++ b/ambari-web/app/controllers/main/service/add_controller.js
@@ -428,7 +428,7 @@ App.AddServiceController = App.WizardController.extend(App.AddSecurityConfigs, {
     return {
       "context": Em.I18n.t('requestInfo.installServices'),
       "ServiceInfo": {"state": "INSTALLED"},
-      "urlParams": "ServiceInfo/service_display_name.in(" + selectedServices.join(',') + ")"
+      "urlParams": "ServiceInfo/service_name.in(" + selectedServices.join(',') + ")"
     };
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index d21bcc5..dfbd58f 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -1034,8 +1034,8 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
       selectedStack = App.Stack.find().findProperty('isSelected', true);
     }
     return this.get('selectedServices').map(service => selectedStack ?
-      {"ServiceInfo": { "service_name": service.get('serviceName'), "service_display_name": service.get('serviceName'), "service_group_name": App.get('defaultServiceGroupName'), "desired_repository_version_id": selectedStack.get('versionInfoId') }} :
-      {"ServiceInfo": { "service_name": service.get('serviceName'), "service_display_name": service.get('serviceName'), "service_group_name": App.get('defaultServiceGroupName'), }});
+      {"ServiceInfo": { "service_name": service.get('serviceName'), "service_type": service.get('serviceName'), "service_group_name": App.get('defaultServiceGroupName'), "desired_repository_version_id": selectedStack.get('versionInfoId') }} :
+      {"ServiceInfo": { "service_name": service.get('serviceName'), "service_type": service.get('serviceName'), "service_group_name": App.get('defaultServiceGroupName'), }});
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/controllers/wizard/step9_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step9_controller.js b/ambari-web/app/controllers/wizard/step9_controller.js
index e096c4f..9f27f65 100644
--- a/ambari-web/app/controllers/wizard/step9_controller.js
+++ b/ambari-web/app/controllers/wizard/step9_controller.js
@@ -492,7 +492,7 @@ App.WizardStep9Controller = Em.Controller.extend(App.ReloadPopupMixin, {
         data = {
           "context": Em.I18n.t("requestInfo.startAddedServices"),
           "ServiceInfo": { "state": "STARTED" },
-          "urlParams": "ServiceInfo/state=INSTALLED&ServiceInfo/service_display_name.in(" + servicesList.join(",") + ")&params/run_smoke_test=true&params/reconfigure_client=false"
+          "urlParams": "ServiceInfo/state=INSTALLED&ServiceInfo/service_name.in(" + servicesList.join(",") + ")&params/run_smoke_test=true&params/reconfigure_client=false"
         };
         break;
       default:

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/mappers/service_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/service_mapper.js b/ambari-web/app/mappers/service_mapper.js
index 38d2724..24130dd 100644
--- a/ambari-web/app/mappers/service_mapper.js
+++ b/ambari-web/app/mappers/service_mapper.js
@@ -21,8 +21,8 @@ var misc = require('utils/misc');
 App.serviceMapper = App.QuickDataMapper.create({
   model: App.Service,
   config: {
-    id: 'ServiceInfo.service_display_name',
-    service_name: 'ServiceInfo.service_display_name',
+    id: 'ServiceInfo.service_name',
+    service_name: 'ServiceInfo.service_name',
     work_status: 'ServiceInfo.state',
     desired_repository_version_id: 'ServiceInfo.desired_repository_version_id'
   },
@@ -33,15 +33,15 @@ App.serviceMapper = App.QuickDataMapper.create({
     var self = this;
     var passiveStateMap = this.get('passiveStateMap');
     json.items.forEach(function (service) {
-      var cachedService = App.cache['services'].findProperty('ServiceInfo.service_display_name', service.ServiceInfo.service_display_name);
+      var cachedService = App.cache['services'].findProperty('ServiceInfo.service_name', service.ServiceInfo.service_name);
       if (cachedService) {
         // restore service workStatus
-        App.Service.find(cachedService.ServiceInfo.service_display_name).set('workStatus', service.ServiceInfo.state);
+        App.Service.find(cachedService.ServiceInfo.service_name).set('workStatus', service.ServiceInfo.state);
         cachedService.ServiceInfo.state = service.ServiceInfo.state;
       } else {
         var serviceData = {
           ServiceInfo: {
-            service_display_name: service.ServiceInfo.service_display_name,
+            service_name: service.ServiceInfo.service_name,
             state: service.ServiceInfo.state,
             desired_repository_version_id: service.ServiceInfo.desired_repository_version_id
           },
@@ -50,7 +50,7 @@ App.serviceMapper = App.QuickDataMapper.create({
         };
         App.cache['services'].push(serviceData);
       }
-      passiveStateMap[service.ServiceInfo.service_display_name] = service.ServiceInfo.maintenance_state;
+      passiveStateMap[service.ServiceInfo.service_name] = service.ServiceInfo.maintenance_state;
     });
 
     if (!this.get('initialAppLoad')) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/mappers/service_metrics_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/service_metrics_mapper.js b/ambari-web/app/mappers/service_metrics_mapper.js
index 87cd2a0..9f79905 100644
--- a/ambari-web/app/mappers/service_metrics_mapper.js
+++ b/ambari-web/app/mappers/service_metrics_mapper.js
@@ -25,8 +25,8 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
 
   model: App.Service,
   config: {
-    id: 'ServiceInfo.service_display_name',
-    service_name: 'ServiceInfo.service_display_name',
+    id: 'ServiceInfo.service_name',
+    service_name: 'ServiceInfo.service_name',
     work_status: 'ServiceInfo.state',
     passive_state: 'ServiceInfo.passive_state',
     $rand: Math.random(),
@@ -195,7 +195,7 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
 
       json.items.forEach(function (component) {
         var serviceName = component.ServiceComponentInfo.service_name;
-        var service = services.findProperty('ServiceInfo.service_display_name', serviceName);
+        var service = services.findProperty('ServiceInfo.service_name', serviceName);
         if (service) {
           service.components.push(component);
         }
@@ -224,7 +224,7 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
           if (hostComponent.get('isLoaded')) {
             this.deleteRecord(hostComponent);
           }
-          var serviceCache = services.findProperty('ServiceInfo.service_display_name', hostComponent.get('service.serviceName'));
+          var serviceCache = services.findProperty('ServiceInfo.service_name', hostComponent.get('service.serviceName'));
           if (serviceCache) {
             serviceCache.host_components = serviceCache.host_components.without(hostComponent.get('id'));
           }
@@ -236,7 +236,7 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
 
       //parse service metrics from components
       services.forEach(function (item) {
-        hostComponents.filterProperty('service_id', item.ServiceInfo.service_display_name).mapProperty('id').forEach(function (hostComponent) {
+        hostComponents.filterProperty('service_id', item.ServiceInfo.service_name).mapProperty('id').forEach(function (hostComponent) {
           if (!item.host_components.contains(hostComponent)) {
             item.host_components.push(hostComponent);
           }
@@ -245,7 +245,7 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
 
         var extendedModelInfo = this.mapExtendedModel(item);
         if (extendedModelInfo) {
-          extendedModelInfo.passive_state = App.Service.find(item.ServiceInfo.service_display_name).get('passiveState');
+          extendedModelInfo.passive_state = App.Service.find(item.ServiceInfo.service_name).get('passiveState');
           result.push(extendedModelInfo);
         }
       }, this);
@@ -288,33 +288,33 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
    */
   mapExtendedModel: function(item) {
     var finalJson = false;
-    if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "HDFS") {
+    if (item && item.ServiceInfo && item.ServiceInfo.service_name == "HDFS") {
       finalJson = this.hdfsMapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoad(App.HDFSService, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "HBASE") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "HBASE") {
       finalJson = this.hbaseMapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoad(App.HBaseService, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "FLUME") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "FLUME") {
       finalJson = this.flumeMapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoadMany(App.FlumeAgent, finalJson.agentJsons);
       App.store.safeLoad(App.FlumeService, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "YARN") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "YARN") {
       finalJson = this.yarnMapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoad(App.YARNService, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "MAPREDUCE2") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "MAPREDUCE2") {
       finalJson = this.mapreduce2Mapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoad(App.MapReduce2Service, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "STORM") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "STORM") {
       finalJson = this.stormMapper(item);
       finalJson.rand = Math.random();
       this.mapQuickLinks(finalJson, item);
       App.store.safeLoad(App.StormService, finalJson);
-    } else if (item && item.ServiceInfo && item.ServiceInfo.service_display_name == "RANGER") {
+    } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "RANGER") {
       finalJson = this.rangerMapper(item);
       finalJson.rand = Math.random();
       App.store.safeLoad(App.RangerService, finalJson);
@@ -339,13 +339,13 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
     // set tooltip for client-only services
     var clientOnlyServiceNames = App.get('services.clientOnly');
     clientOnlyServiceNames.forEach(function (serviceName) {
-      var service = services.findProperty('ServiceInfo.service_display_name', serviceName);
+      var service = services.findProperty('ServiceInfo.service_name', serviceName);
       if (service) {
         service.tool_tip_content = Em.I18n.t('services.service.summary.clientOnlyService.ToolTip');
       }
     });
     hostComponents.forEach(function (hostComponent) {
-      var service = services.findProperty('ServiceInfo.service_display_name', hostComponent.service_id);
+      var service = services.findProperty('ServiceInfo.service_name', hostComponent.service_id);
       if (hostComponent) {
         // set advanced nameNode display name for HA, Active NameNode or Standby NameNode
         // this is useful on three places: 1) HDFS health status hover tooltip, 2) HDFS service summary 3) NameNode component on host detail page
@@ -411,8 +411,8 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
       AMBARI_METRICS: [37],
       LOGSEARCH: [38]
     };
-    if (quickLinks[item.ServiceInfo.service_display_name])
-      finalJson.quick_links = quickLinks[item.ServiceInfo.service_display_name];
+    if (quickLinks[item.ServiceInfo.service_name])
+      finalJson.quick_links = quickLinks[item.ServiceInfo.service_name];
   },
 
   hdfsMapper: function (item) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/mixins/common/configs/configs_saver.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_saver.js b/ambari-web/app/mixins/common/configs/configs_saver.js
index 19a4950..6b69ca1 100644
--- a/ambari-web/app/mixins/common/configs/configs_saver.js
+++ b/ambari-web/app/mixins/common/configs/configs_saver.js
@@ -750,7 +750,7 @@ App.ConfigsSaverMixin = Em.Mixin.create({
       };
 
       if (this.get('content.serviceName') === 'HDFS') {
-        options.urlParams += '&ServiceComponentInfo/service_display_name.in(HDFS)'
+        options.urlParams += '&ServiceComponentInfo/service_name.in(HDFS)'
       }
     } else {
       options = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/mixins/wizard/wizardProgressPageController.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/wizardProgressPageController.js b/ambari-web/app/mixins/wizard/wizardProgressPageController.js
index 7e3aa1f..3dcde74 100644
--- a/ambari-web/app/mixins/wizard/wizardProgressPageController.js
+++ b/ambari-web/app/mixins/wizard/wizardProgressPageController.js
@@ -457,7 +457,7 @@ App.wizardProgressPageControllerMixin = Em.Mixin.create(App.InstallComponent, {
         }).join(',');
       }
       data.context = "Stop required services";
-      data.urlParams = "ServiceInfo/service_display_name.in(" + servicesList + ")";
+      data.urlParams = "ServiceInfo/service_name.in(" + servicesList + ")";
     }
     return App.ajax.send({
       name: 'common.services.update',
@@ -497,7 +497,7 @@ App.wizardProgressPageControllerMixin = Em.Mixin.create(App.InstallComponent, {
         }).join(',');
       }
       data.context = "Start required services";
-      data.urlParams = "ServiceInfo/service_display_name.in(" + servicesList + ")";
+      data.urlParams = "ServiceInfo/service_name.in(" + servicesList + ")";
     } else {
       data.context = "Start all services";
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba1ec6dd/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index ad859f4..91748ed 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1425,7 +1425,7 @@ var urls = {
     'mock': ''
   },
   'common.create_component': {
-    'real': '/clusters/{clusterName}/servicegroups/{defaultServiceGroupName}/services?ServiceInfo/service_display_name={serviceName}',
+    'real': '/clusters/{clusterName}/servicegroups/{defaultServiceGroupName}/services?ServiceInfo/service_name={serviceName}',
     'mock': '',
     'type': 'POST',
     'format': function (data) {
@@ -2596,7 +2596,7 @@ var urls = {
     'mock': ''
   },
   'components.get_category': {
-    'real': '/clusters/{clusterName}/components?fields=ServiceComponentInfo/component_name,ServiceComponentInfo/service_name,ServiceComponentInfo/service_display_name,ServiceComponentInfo/category,ServiceComponentInfo/recovery_enabled,ServiceComponentInfo/total_count&minimal_response=true',
+    'real': '/clusters/{clusterName}/components?fields=ServiceComponentInfo/component_name,ServiceComponentInfo/service_name,ServiceComponentInfo/service_name,ServiceComponentInfo/category,ServiceComponentInfo/recovery_enabled,ServiceComponentInfo/total_count&minimal_response=true',
     'mock': ''
   },
   'components.update': {
@@ -2666,7 +2666,7 @@ var urls = {
     'mock': ''
   },
   'host_components.with_services_names': {
-    'real': '/clusters/{clusterName}/host_components?fields=component/ServiceComponentInfo/service_name,ServiceComponentInfo/service_display_name,HostRoles/host_name&minimal_response=true',
+    'real': '/clusters/{clusterName}/host_components?fields=component/ServiceComponentInfo/service_name,ServiceComponentInfo/service_name,HostRoles/host_name&minimal_response=true',
     'mock': ''
   },
   'components.get_installed': {


[47/50] [abbrv] ambari git commit: AMBARI-22176. Remove duplicate kerberos setting for superset (Slim Bouguerra via rlevas)

Posted by ja...@apache.org.
AMBARI-22176. Remove duplicate kerberos setting for superset (Slim Bouguerra via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a3a8afcd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a3a8afcd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a3a8afcd

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: a3a8afcd62e4c32b9972b2fcdd4744f7d6728e2a
Parents: 24c3589
Author: Slim Bouguerra <sb...@hortonworks.com>
Authored: Tue Oct 10 13:14:19 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Oct 10 13:14:19 2017 -0400

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/DRUID/kerberos.json | 30 --------------------
 1 file changed, 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a3a8afcd/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
index 464e420..198c351 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
@@ -35,27 +35,6 @@
           }
         },
         {
-          "name": "superset",
-          "principal": {
-            "value": "${druid-env/druid_user}@${realm}",
-            "type": "user",
-            "configuration": "druid-superset/KERBEROS_PRINCIPAL",
-            "local_username": "${druid-env/druid_user}"
-          },
-          "keytab": {
-            "file": "${keytab_dir}/superset.headless.keytab",
-            "owner": {
-              "name": "${druid-env/druid_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": "r"
-            },
-            "configuration": "druid-superset/KERBEROS_KEYTAB"
-          }
-        },
-        {
           "name": "druid_smokeuser",
           "reference": "/smokeuser"
         }
@@ -105,15 +84,6 @@
               "reference": "/druid"
             }
           ]
-        },
-        {
-          "name": "DRUID_SUPERSET",
-          "identities": [
-            {
-              "name": "druid_druid_superset_druid",
-              "reference": "/druid"
-            }
-          ]
         }
       ],
       "configurations": [


[39/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e037a8d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e037a8d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e037a8d7

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: e037a8d7194ac97da9f746e52eb53cf15ba2415f
Parents: 3b8e807
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 14:30:13 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 14:30:13 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  | 45 ++++++++++++++------
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  2 +
 .../src/test/python/stacks/utils/RMFTestCase.py |  4 +-
 3 files changed, 37 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..4282213 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
+
     return Script.stack_version_from_distro_select
 
 
@@ -525,22 +526,20 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-    
+
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    stack_version_package_formatted = ""
+    if not STACK_VERSION_PLACEHOLDER in name:
+      return name
 
-    if not repo_version:
-      repo_version = self.get_stack_version_before_packages_installed()
+    stack_version_package_formatted = ""
 
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -550,6 +549,17 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
+    package_version = None
+    if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
+      self.load_available_packages()
+      package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+      if package_name is None:
+        raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+      return package_name
+
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -568,6 +578,7 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
+      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +771,19 @@ class Script(object):
     """
     self.install_packages(env)
 
+  def load_available_packages(self):
+    if self.available_packages_in_repos:
+      return self.available_packages_in_repos
+
+
+    pkg_provider = get_provider("Package")   
+    try:
+      available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+    except Exception as err:
+      Logger.exception("Unable to load available packages")
+      available_packages_in_repos = []
+
+
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -782,17 +806,11 @@ class Script(object):
       package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
-      pkg_provider = get_provider("Package")
-      try:
-        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
-      except Exception as err:
-        Logger.exception("Unable to load available packages")
-        available_packages_in_repos = []
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.get_package_from_available(package['name'], available_packages_in_repos)
+            name = self.format_package_name(package['name'])
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1110,6 @@ class Script(object):
 
 
   def __init__(self):
+    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..e32393d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,8 +95,10 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
+                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
 
+
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
                                 retry_count=5,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e037a8d7/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False):
+                    log_out_files=False,
+                    available_packages_in_repos = []):
 
     norm_path = os.path.normpath(path)
 
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
+        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))


[50/50] [abbrv] ambari git commit: Fix build break due to merge (jluniya)

Posted by ja...@apache.org.
Fix build break due to merge (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ddb07cd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ddb07cd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ddb07cd

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 5ddb07cdb376413855a17363f710256290048fa7
Parents: 045d9bf
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Oct 10 23:42:23 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Oct 10 23:42:23 2017 -0700

----------------------------------------------------------------------
 .../ambari/server/state/svccomphost/ServiceComponentHostImpl.java   | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5ddb07cd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 9e8460e..6190a03 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1225,7 +1225,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     }
 
     try {
-      Cluster cluster = clusters.getCluster(clusterName);
       ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(serviceComponentName);
       ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
       String refreshConfigsCommand = helper.getRefreshConfigsCommand(cluster,sch);


[44/50] [abbrv] ambari git commit: AMBARI-21406. Refresh configurations without restart command (magyari_sandor)

Posted by ja...@apache.org.
AMBARI-21406. Refresh configurations without restart command (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/57682942
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/57682942
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/57682942

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 57682942b7368a8de3f0a76f65e45b13c1626deb
Parents: 8908d3e
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri Aug 25 14:08:55 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Oct 10 16:46:30 2017 +0200

----------------------------------------------------------------------
 .../libraries/script/script.py                  |  23 ++-
 .../AmbariCustomCommandExecutionHelper.java     |  10 ++
 .../AmbariManagementControllerImpl.java         |   4 +-
 .../ServiceComponentHostResponse.java           |  15 ++
 .../internal/HostComponentResourceProvider.java |   4 +
 .../ambari/server/metadata/ActionMetadata.java  |   1 +
 .../apache/ambari/server/stack/StackModule.java |  29 +++-
 .../ambari/server/state/ConfigHelper.java       | 164 ++++++++++++++++++-
 .../ambari/server/state/PropertyInfo.java       |  29 ++++
 .../ambari/server/state/RefreshCommand.java     |  52 ++++++
 .../state/RefreshCommandConfiguration.java      |  71 ++++++++
 .../apache/ambari/server/state/StackInfo.java   |  10 ++
 .../svccomphost/ServiceComponentHostImpl.java   |  10 ++
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |  12 ++
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   3 +
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  13 +-
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |  52 +++++-
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   5 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  21 +++
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  21 ++-
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |  10 ++
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/3.0.0.3.0/package/scripts/datanode.py  |  13 +-
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      |  52 +++++-
 .../3.0.0.3.0/package/scripts/hdfs_client.py    |   5 +
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |  20 +++
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |  21 ++-
 .../HDFS/3.0.0.3.0/package/scripts/snamenode.py |  10 ++
 .../src/main/resources/configuration-schema.xsd |  12 ++
 .../src/main/resources/properties.json          |   1 +
 .../services/HDFS/configuration/hdfs-site.xml   |   3 +
 .../ambari/server/state/ConfigHelperTest.java   |  76 ++++++++-
 .../ambari/server/state/PropertyInfoTest.java   |  20 +++
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  17 ++
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  33 ++++
 .../services/HDFS/configuration/hdfs-site.xml   |   8 +
 36 files changed, 835 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index bf8c0dc..12e6f98 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -1006,12 +1006,33 @@ class Script(object):
 
   def configure(self, env, upgrade_type=None, config_dir=None):
     """
-    To be overridden by subclasses
+    To be overridden by subclasses (may invoke save_configs)
     :param upgrade_type: only valid during RU/EU, otherwise will be None
     :param config_dir: for some clients during RU, the location to save configs to, otherwise None
     """
     self.fail_with_error('configure method isn\'t implemented')
 
+  def save_configs(self, env):
+    """
+    To be overridden by subclasses
+    Creates / updates configuration files
+    """
+    self.fail_with_error('save_configs method isn\'t implemented')
+
+  def reconfigure(self, env):
+    """
+    Default implementation of RECONFIGURE action which may be overridden by subclasses
+    """
+    Logger.info("Refresh config files ...")
+    self.save_configs(env)
+
+    config = self.get_config()
+    if "reconfigureAction" in config["commandParams"] and config["commandParams"]["reconfigureAction"] is not None:
+      reconfigure_action = config["commandParams"]["reconfigureAction"]
+      Logger.info("Call %s" % reconfigure_action)
+      method = self.choose_method_to_execute(reconfigure_action)
+      method(env)
+
   def generate_configs_get_template_file_content(self, filename, dicts):
     config = self.get_config()
     content = ''

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index d0dd7e0..e12477e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -89,6 +89,7 @@ import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.PropertyInfo.PropertyType;
+import org.apache.ambari.server.state.RefreshCommandConfiguration;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -507,6 +508,15 @@ public class AmbariCustomCommandExecutionHelper {
       StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
       roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
 
+      // set reconfigureAction in case of a RECONFIGURE command if there are any
+      if (commandName.equals("RECONFIGURE")) {
+        String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, hostName, serviceName, componentName);
+        if (refreshConfigsCommand != null && !refreshConfigsCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+              LOG.info("Refreshing configs for {}/{} with command: ", componentName, hostName, refreshConfigsCommand);
+          commandParams.put("reconfigureAction", refreshConfigsCommand);
+        }
+      }
+
       execCmd.setCommandParams(commandParams);
       execCmd.setRoleParams(roleParams);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5642575..8c4888c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -4795,7 +4795,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       properties = ambariMetaInfo.getServiceProperties(stackName, stackVersion, serviceName);
     }
     for (PropertyInfo property: properties) {
-      response.add(property.convertToResponse());
+      if (property.shouldBeConfigured()) {
+        response.add(property.convertToResponse());
+      }
     }
 
     return response;

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index 7b75e06..bc67117 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -40,6 +40,7 @@ public class ServiceComponentHostResponse {
   private String desiredRepositoryVersion;
   private String desiredState;
   private boolean staleConfig = false;
+  private boolean reloadConfig = false;
   private String adminState = null;
   private String maintenanceState = null;
   private UpgradeState upgradeState = UpgradeState.NONE;
@@ -283,6 +284,20 @@ public class ServiceComponentHostResponse {
   }
 
   /**
+   * @return true if configs are reloadable without RESTART command
+   */
+  public boolean isReloadConfig() {
+    return reloadConfig;
+  }
+
+  /**
+   * @param reloadConfig
+   */
+  public void setReloadConfig(boolean reloadConfig) {
+    this.reloadConfig = reloadConfig;
+  }
+
+  /**
    * @return the maintenance state
    */
   public String getMaintenanceState() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 48e15eb..6708560 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -106,6 +106,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
   public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
+  public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID
+      = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
   public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
   public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
@@ -244,6 +246,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
               response.getActualConfigs(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
               response.isStaleConfig(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID,
+              response.isReloadConfig(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
               response.getUpgradeState(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
index e0bfdcf..33dc0e8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
@@ -65,6 +65,7 @@ public class ActionMetadata {
     defaultHostComponentCommands.add("CONFIGURE");
     defaultHostComponentCommands.add("CONFIGURE_FUNCTION");
     defaultHostComponentCommands.add("DISABLE_SECURITY");
+    defaultHostComponentCommands.add("RECONFIGURE");
   }
 
   private void fillServiceClients() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index e88bbf2..520764d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RefreshCommand;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
@@ -579,6 +580,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       }
       // Read the service and available configs for this stack
       populateServices();
+
       if (!stackInfo.isValid()) {
         setValid(false);
         addErrors(stackInfo.getErrors());
@@ -627,7 +629,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     for (ServiceInfo serviceInfo : serviceInfos) {
       ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory);
       serviceModules.add(serviceModule);
-      if (!serviceModule.isValid()){
+      if (!serviceModule.isValid()) {
         stackInfo.setValid(false);
         setValid(false);
         stackInfo.addErrors(serviceModule.getErrors());
@@ -769,7 +771,11 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     // relationship into map. Since we do not have the reverse {@link PropertyInfo},
     // we have to loop through service-configs again later.
     for (ServiceModule serviceModule : serviceModules.values()) {
+
+      Map<String, Map<String, String>> componentRefreshCommandsMap = new HashMap();
+
       for (PropertyInfo pi : serviceModule.getModuleInfo().getProperties()) {
+
         for (PropertyDependencyInfo pdi : pi.getDependsOnProperties()) {
           String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
           String name = pi.getName();
@@ -784,7 +790,28 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
             dependedByMap.put(pdi, newDependenciesSet);
           }
         }
+
+        // set refresh commands
+        if (pi.getSupportedRefreshCommands() != null && pi.getSupportedRefreshCommands().size() > 0) {
+          String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
+          String propertyName = type + "/" + pi.getName();
+
+          Map<String, String> refreshCommandPropertyMap = componentRefreshCommandsMap.get(propertyName);
+
+          for (RefreshCommand refreshCommand : pi.getSupportedRefreshCommands()) {
+            String componentName = refreshCommand.getComponentName();
+            if (refreshCommandPropertyMap == null) {
+              refreshCommandPropertyMap = new HashMap<>();
+              componentRefreshCommandsMap.put(propertyName, refreshCommandPropertyMap);
+            }
+            refreshCommandPropertyMap.put(componentName, refreshCommand.getCommand());
+          }
+
+        }
+
       }
+
+      stackInfo.getRefreshCommandConfiguration().addRefreshCommands(componentRefreshCommandsMap);
     }
 
     // Go through all service-configs again and set their 'depended-by' if necessary.

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index bb7fcbe..eade914 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -17,12 +17,15 @@
  */
 package org.apache.ambari.server.state;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -41,6 +44,7 @@ import org.apache.ambari.server.state.PropertyInfo.PropertyType;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.SecretReference;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -72,6 +76,8 @@ public class ConfigHelper {
    */
   private final Cache<Integer, Boolean> staleConfigsCache;
 
+  private final Cache<Integer, String> refreshConfigCommandCache;
+
   private static final Logger LOG =
       LoggerFactory.getLogger(ConfigHelper.class);
 
@@ -113,6 +119,9 @@ public class ConfigHelper {
     STALE_CONFIGS_CACHE_EXPIRATION_TIME = configuration.staleConfigCacheExpiration();
     staleConfigsCache = CacheBuilder.newBuilder().
         expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
+
+    refreshConfigCommandCache = CacheBuilder.newBuilder().
+            expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
   }
 
   /**
@@ -1302,6 +1311,8 @@ public class ConfigHelper {
 
     StackId stackId = sch.getServiceComponent().getDesiredStackId();
 
+    StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
     ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
             stackId.getStackVersion(), sch.getServiceName());
 
@@ -1316,8 +1327,10 @@ public class ConfigHelper {
     // ---- merge values, determine changed keys, check stack: stale
 
     Iterator<Entry<String, Map<String, String>>> it = desired.entrySet().iterator();
+    List<String> changedProperties = new LinkedList<>();
 
-    while (it.hasNext() && !stale) {
+    while (it.hasNext()) {
+      boolean staleEntry = false;
       Entry<String, Map<String, String>> desiredEntry = it.next();
 
       String type = desiredEntry.getKey();
@@ -1325,29 +1338,108 @@ public class ConfigHelper {
 
       if (!actual.containsKey(type)) {
         // desired is set, but actual is not
-        if (!serviceInfo.hasConfigDependency(type)) {
-          stale = componentInfo != null && componentInfo.hasConfigType(type);
-        } else {
-          stale = true;
-        }
+        staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
       } else {
         // desired and actual both define the type
         HostConfig hc = actual.get(type);
         Map<String, String> actualTags = buildTags(hc);
 
         if (!isTagChanged(tags, actualTags, hasGroupSpecificConfigsForType(cluster, sch.getHostName(), type))) {
-          stale = false;
+          staleEntry = false;
         } else {
-          stale = serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type);
+          staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
+          if (staleEntry) {
+            Collection<String> changedKeys = findChangedKeys(cluster, type, tags.values(), actualTags.values());
+            changedProperties.addAll(changedKeys);
+          }
         }
       }
+      stale = stale | staleEntry;
     }
+    
+    String refreshCommand = calculateRefreshCommand(stackInfo.getRefreshCommandConfiguration(), sch, changedProperties);
+
     if (STALE_CONFIGS_CACHE_ENABLED) {
       staleConfigsCache.put(staleHash, stale);
+      if (refreshCommand != null) {
+        refreshConfigCommandCache.put(staleHash, refreshCommand);
+      }
     }
+
+    // gather all changed properties and see if we can find a common refreshConfigs command for this component
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Changed properties {} ({}) {} :  COMMAND: {}", stale, sch.getServiceComponentName(), sch.getHostName(), refreshCommand);
+      for (String p : changedProperties) {
+        LOG.debug(p);
+      }
+    }
+
     return stale;
   }
 
+  public String getRefreshConfigsCommand(Cluster cluster, String hostName, String serviceName, String componentName) throws AmbariException {
+    ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(componentName);
+    ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+    return getRefreshConfigsCommand(cluster, sch);
+  }
+
+  public String getRefreshConfigsCommand(Cluster cluster, ServiceComponentHost sch) throws AmbariException {
+    String refreshCommand = null;
+
+    Map<String, HostConfig> actual = sch.getActualConfigs();
+    if (STALE_CONFIGS_CACHE_ENABLED) {
+      Map<String, Map<String, String>> desired = getEffectiveDesiredTags(cluster, sch.getHostName(),
+              cluster.getDesiredConfigs());
+      int staleHash = Objects.hashCode(actual.hashCode(),
+              desired.hashCode(),
+              sch.getHostName(),
+              sch.getServiceComponentName(),
+              sch.getServiceName());
+      refreshCommand = refreshConfigCommandCache.getIfPresent(staleHash);
+    }
+    return refreshCommand;
+  }
+
+
+  /**
+   * Calculates refresh command for a set of changed properties as follows:
+   *  - if a property has no refresh command return null
+   *  - in case of multiple refresh commands: as REFRESH_CONFIGS is executed by default in case of any other command as well,
+   *  can be overriden by RELOAD_CONFIGS or any other custom command, however in case of any other different commands return null
+   *  as it's not possible to refresh all properties with one command.
+   *
+   *  examples:
+   *     {REFRESH_CONFIGS, REFRESH_CONFIGS, RELOAD_CONFIGS} ==> RELOAD_CONFIGS
+   *     {REFRESH_CONFIGS, RELOADPROXYUSERS, RELOAD_CONFIGS} ==> null
+   *
+   * @param refreshCommandConfiguration
+   * @param sch
+   * @param changedProperties
+   * @return
+   */
+  private String calculateRefreshCommand(RefreshCommandConfiguration refreshCommandConfiguration,
+                                         ServiceComponentHost sch, List<String> changedProperties) {
+
+    String finalRefreshCommand = null;
+    for (String propertyName : changedProperties) {
+      String refreshCommand = refreshCommandConfiguration.getRefreshCommandForComponent(sch, propertyName);
+      if (refreshCommand == null) {
+        return null;
+      }
+      if (finalRefreshCommand == null) {
+        finalRefreshCommand = refreshCommand;
+      }
+      if (!finalRefreshCommand.equals(refreshCommand)) {
+        if (finalRefreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+          finalRefreshCommand = refreshCommand;
+        } else if (!refreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+          return null;
+        }
+      }
+    }
+    return finalRefreshCommand;
+  }
+
   /**
    * Determines if the hostname has group specific configs for the type specified
    *
@@ -1374,6 +1466,62 @@ public class ConfigHelper {
   }
 
   /**
+   * @return the keys that have changed values
+   */
+  private Collection<String> findChangedKeys(Cluster cluster, String type,
+                                             Collection<String> desiredTags, Collection<String> actualTags) {
+
+    Map<String, String> desiredValues = new HashMap<>();
+    Map<String, String> actualValues = new HashMap<>();
+
+    for (String tag : desiredTags) {
+      Config config = cluster.getConfig(type, tag);
+      if (null != config) {
+        desiredValues.putAll(config.getProperties());
+      }
+    }
+
+    for (String tag : actualTags) {
+      Config config = cluster.getConfig(type, tag);
+      if (null != config) {
+        actualValues.putAll(config.getProperties());
+      }
+    }
+
+    List<String> keys = new ArrayList<>();
+
+    for (Entry<String, String> entry : desiredValues.entrySet()) {
+      String key = entry.getKey();
+      String value = entry.getValue();
+
+      if (!actualValues.containsKey(key) || !valuesAreEqual(actualValues.get(key), value)) {
+        keys.add(type + "/" + key);
+      }
+    }
+
+    return keys;
+  }
+
+  /**
+   * Compares values as double in case they are numbers.
+   * @param actualValue
+   * @param newValue
+   * @return
+   */
+  private  boolean valuesAreEqual(String actualValue, String newValue) {
+    boolean actualValueIsNumber = NumberUtils.isNumber(actualValue);
+    boolean newValueIsNumber = NumberUtils.isNumber(newValue);
+    if (actualValueIsNumber && newValueIsNumber) {
+      Double ab = Double.parseDouble(actualValue);
+      Double bb = Double.parseDouble(newValue);
+      return ab.equals(bb);
+    } else if (!actualValueIsNumber && !newValueIsNumber) {
+      return actualValue.equals(newValue);
+    }
+    return false;
+  }
+
+  /**
    * @return the map of tags for a desired config
    */
   private Map<String, String> buildTags(HostConfig hc) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 63c850e..31fcb9d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -90,6 +90,11 @@ public class PropertyInfo {
   private Set<PropertyDependencyInfo> usedByProperties =
           new HashSet<>();
 
+  @XmlElementWrapper(name="supported-refresh-commands")
+  @XmlElement(name="refresh-command")
+  private Set<RefreshCommand> supportedRefreshCommands = new HashSet<>();
+
+
   //This method is called after all the properties (except IDREF) are unmarshalled for this object,
   //but before this object is set to the parent object.
   void afterUnmarshal(Unmarshaller unmarshaller, Object parent) {
@@ -209,6 +214,30 @@ public class PropertyInfo {
     this.requireInput = requireInput;
   }
 
+  public List<Element> getPropertyAttributes() {
+    return propertyAttributes;
+  }
+
+  public void setPropertyAttributes(List<Element> propertyAttributes) {
+    this.propertyAttributes = propertyAttributes;
+  }
+
+  public Set<RefreshCommand> getSupportedRefreshCommands() {
+    return supportedRefreshCommands;
+  }
+
+  public void setSupportedRefreshCommands(Set<RefreshCommand> supportedRefreshCommands) {
+    this.supportedRefreshCommands = supportedRefreshCommands;
+  }
+
+  /**
+   * Willcard properties should not be included to stack configurations.
+   * @return
+   */
+  public boolean shouldBeConfigured() {
+    return !getName().contains("*");
+  }
+
   @Override
   public int hashCode() {
     final int prime = 31;

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
new file mode 100644
index 0000000..e09a875
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import javax.xml.bind.annotation.XmlAttribute;
+
+/**
+ * Represents a RefreshCommand defined for a component and a property.
+ */
+public class RefreshCommand {
+
+  @XmlAttribute(name="componentName", required = true)
+  private String componentName;
+
+  /**
+   * Default command is reload_configs.
+   */
+  @XmlAttribute(name="command", required = false)
+  private String command = RefreshCommandConfiguration.RELOAD_CONFIGS;
+
+  public RefreshCommand() {
+  }
+
+  public RefreshCommand(String componentName, String command) {
+    this.componentName = componentName;
+    this.command = command;
+  }
+
+  public String getComponentName() {
+    return componentName;
+  }
+
+  public String getCommand() {
+    return command;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
new file mode 100644
index 0000000..5999c6c
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class RefreshCommandConfiguration {
+
+  public static final String RELOAD_CONFIGS = "reload_configs";
+  public static final String REFRESH_CONFIGS = "refresh_configs";
+
+  private Map<String, Map<String, String>> propertyComponentCommandMap;
+
+  public RefreshCommandConfiguration() {
+  }
+
+  private String findKey(String propertyName) {
+    for (String keyName : propertyComponentCommandMap.keySet()) {
+      if (propertyName.startsWith(keyName)) {
+        return keyName;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * If no command is defined for a component then the default command will be REFRESH_CONFIGS in case of a client component or
+   * if there's only one command defined for an another component. This is because if RELOAD_CONFIGS is defined for NAMENODE then
+   * presumably other dependent components will need just a refresh.
+   */
+  public String getRefreshCommandForComponent(ServiceComponentHost sch, String propertyName) {
+    if (sch.isClientComponent()) {
+      return REFRESH_CONFIGS;
+    }
+    String keyName = findKey(propertyName);
+    Map<String, String> componentCommandMap = propertyComponentCommandMap.get(keyName);
+    if (componentCommandMap != null) {
+      String commandForComponent = componentCommandMap.get(sch.getServiceComponentName());
+      if (commandForComponent != null) {
+        return commandForComponent;
+      } else if(componentCommandMap.size() == 1) {
+        return REFRESH_CONFIGS;
+      }
+    }
+    return null;
+  }
+
+  public void addRefreshCommands(Map<String, Map<String, String>> refreshCommands) {
+    if (propertyComponentCommandMap == null) {
+      propertyComponentCommandMap = new HashMap();
+    }
+    propertyComponentCommandMap.putAll(refreshCommands);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index c32e907..70d5926 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -90,6 +90,8 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   * */
   private List<String> servicesWithNoConfigs = new ArrayList<>();
 
+  private RefreshCommandConfiguration refreshCommandConfiguration = new RefreshCommandConfiguration();
+
   public String getMinJdk() {
     return minJdk;
   }
@@ -604,4 +606,12 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   public VersionDefinitionXml getLatestVersionDefinition() {
     return latestVersion;
   }
+
+  public RefreshCommandConfiguration getRefreshCommandConfiguration() {
+    return refreshCommandConfiguration;
+  }
+
+  public void setRefreshCommandConfiguration(RefreshCommandConfiguration refreshCommandConfiguration) {
+    this.refreshCommandConfiguration = refreshCommandConfiguration;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index f490ff0..3b8f6da 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1181,6 +1181,16 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       LOG.error("Could not determine stale config", e);
     }
 
+    try {
+      Cluster cluster = clusters.getCluster(clusterName);
+      ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(serviceComponentName);
+      ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+      String refreshConfigsCommand = helper.getRefreshConfigsCommand(cluster,sch);
+      r.setReloadConfig(refreshConfigsCommand != null);
+    } catch (Exception e) {
+      LOG.error("Could not determine reload config flag", e);
+    }
+
     return r;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
index 5c6f043..d39ea78 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
@@ -185,4 +185,16 @@ DEFAULT
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>hadoop.proxyuser.*</name>
+    <value/>
+    <description>
+      This * property is not configured it's used just to define refresh commands for all properties
+      prefixed with hadoop.proxyuser.
+    </description>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reloadproxyusers" />
+    </supported-refresh-commands>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index 7fdc227..d97a52e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -184,6 +184,9 @@
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
     <on-ambari-upgrade add="true"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 0aa0bc0..c0abb15 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -31,7 +31,7 @@ from resource_management.libraries.functions.decorator import retry
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
@@ -57,6 +57,17 @@ class DataNode(Script):
     hdfs("datanode")
     datanode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("datanode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("datanode", params.dfs_dn_ipc_address)
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 07c7616..4022986 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -20,12 +20,16 @@ Ambari Agent
 """
 
 from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
 from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -157,6 +161,52 @@ def install_snappy():
        to=params.so_src_x64,
   )
 
+class ConfigStatusParser():
+    def __init__(self):
+        self.reconfig_successful = False
+
+    def handle_new_line(self, line, is_stderr):
+        if is_stderr:
+            return
+
+        if line.startswith('SUCCESS: Changed property'):
+            self.reconfig_successful = True
+
+        Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+    import params
+
+    if params.security_enabled:
+        Execute(params.nn_kinit_cmd,
+                user=params.hdfs_user
+                )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=True,
+             path=params.hadoop_bin_dir
+             )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+    config_status_parser = ConfigStatusParser()
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=False,
+             path=params.hadoop_bin_dir,
+             on_new_line=config_status_parser.handle_new_line
+             )
+
+
+    if not config_status_parser.reconfig_successful:
+        Logger.info('Reconfiguration failed')
+        raise Fail('Reconfiguration failed!')
+
+    Logger.info('Reconfiguration successfully completed.')
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def hdfs(component=None):
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 0896f30..f2e96c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -42,6 +42,11 @@ class HdfsClient(Script):
     env.set_params(params)
     hdfs()
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index cac6e9c..2224f72 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -430,6 +430,27 @@ def is_namenode_formatted(params):
 
   return False
 
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+  import params
+
+  if params.security_enabled:
+    Execute(params.nn_kinit_cmd,
+            user=params.hdfs_user
+            )
+
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+  else:
+    nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+  ExecuteHadoop(nn_refresh_cmd,
+                user=params.hdfs_user,
+                conf_dir=params.hadoop_conf_dir,
+                bin_dir=params.hadoop_bin_dir)
+
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def decommission():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 50bf1e0..291da05 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -46,8 +46,8 @@ from ambari_commons import OSConst
 
 
 import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
 import hdfs_rebalance
 from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
 
@@ -86,6 +86,23 @@ class NameNode(Script):
     hdfs_binary = self.get_hdfs_binary()
     namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("namenode", params.namenode_address)
+
+  def reloadproxyusers(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD HDFS PROXY USERS")
+    refreshProxyUsers()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index 4977e1c..3d387b4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -44,6 +44,16 @@ class SNameNode(Script):
     hdfs("secondarynamenode")
     snamenode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("secondarynamenode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
index 5c28527..940f87c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
@@ -181,6 +181,9 @@
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
@@ -637,5 +640,8 @@
     <name>hadoop.caller.context.enabled</name>
     <value>true</value>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
index d8fb361..a843374 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions.stack_features import check_stack_f
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
@@ -50,6 +50,17 @@ class DataNode(Script):
     hdfs("datanode")
     datanode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("datanode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("datanode", params.dfs_dn_ipc_address)
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 07c7616..4022986 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -20,12 +20,16 @@ Ambari Agent
 """
 
 from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
 from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -157,6 +161,52 @@ def install_snappy():
        to=params.so_src_x64,
   )
 
+class ConfigStatusParser():
+    def __init__(self):
+        self.reconfig_successful = False
+
+    def handle_new_line(self, line, is_stderr):
+        if is_stderr:
+            return
+
+        if line.startswith('SUCCESS: Changed property'):
+            self.reconfig_successful = True
+
+        Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+    import params
+
+    if params.security_enabled:
+        Execute(params.nn_kinit_cmd,
+                user=params.hdfs_user
+                )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=True,
+             path=params.hadoop_bin_dir
+             )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+    config_status_parser = ConfigStatusParser()
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=False,
+             path=params.hadoop_bin_dir,
+             on_new_line=config_status_parser.handle_new_line
+             )
+
+
+    if not config_status_parser.reconfig_successful:
+        Logger.info('Reconfiguration failed')
+        raise Fail('Reconfiguration failed!')
+
+    Logger.info('Reconfiguration successfully completed.')
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def hdfs(component=None):
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
index 0896f30..f2e96c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
@@ -42,6 +42,11 @@ class HdfsClient(Script):
     env.set_params(params)
     hdfs()
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 5a1f368..94cd66c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -460,6 +460,26 @@ def decommission():
                   conf_dir=conf_dir,
                   bin_dir=params.hadoop_bin_dir)
 
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+  import params
+
+  if params.security_enabled:
+    Execute(params.nn_kinit_cmd,
+            user=params.hdfs_user
+            )
+
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+  else:
+    nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+  ExecuteHadoop(nn_refresh_cmd,
+                user=params.hdfs_user,
+                conf_dir=params.hadoop_conf_dir,
+                bin_dir=params.hadoop_bin_dir)
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def decommission():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index 7a0e784..ffdafb8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -46,8 +46,8 @@ from ambari_commons import OSConst
 
 
 import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
 import hdfs_rebalance
 from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
 
@@ -86,6 +86,23 @@ class NameNode(Script):
     hdfs_binary = self.get_hdfs_binary()
     namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("namenode", params.namenode_address)
+
+  def reloadproxyusers(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD HDFS PROXY USERS")
+    refreshProxyUsers()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
index f5ff3e1..b0ed533 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
@@ -44,6 +44,16 @@ class SNameNode(Script):
     hdfs("secondarynamenode")
     snamenode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("secondarynamenode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/configuration-schema.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/configuration-schema.xsd b/ambari-server/src/main/resources/configuration-schema.xsd
index 9350984..12b0217 100644
--- a/ambari-server/src/main/resources/configuration-schema.xsd
+++ b/ambari-server/src/main/resources/configuration-schema.xsd
@@ -41,6 +41,13 @@
       <xs:element name="deleted" type="xs:boolean" minOccurs="0"/>
       <xs:element name="final" type="xs:boolean" minOccurs="0"/>
       <xs:element name="on-ambari-upgrade" type="propertyUpgradeBehavior" minOccurs="1"/>
+      <xs:element name="supported-refresh-commands" minOccurs="0">
+        <xs:complexType>
+          <xs:sequence>
+            <xs:element name="refresh-command" type="refreshCommands" minOccurs="1" maxOccurs="unbounded"/>
+          </xs:sequence>
+        </xs:complexType>
+      </xs:element>
       <xs:element name="on-stack-upgrade" type="propertyStackUpgradeBehavior" minOccurs="0"/>
       <xs:element name="property-type" minOccurs="0">
         <xs:simpleType>
@@ -84,6 +91,11 @@
     <xs:attribute name="merge" type="xs:boolean" use="optional" default="true"/>
   </xs:complexType>
 
+  <xs:complexType name="refreshCommands">
+    <xs:attribute name="componentName" type="xs:string" use="required"/>
+    <xs:attribute name="command" type="xs:string" use="optional"/>
+  </xs:complexType>
+
   <xs:complexType name="valueAttributesInfo">
     <xs:all>
       <xs:element name="type" type="xs:string" minOccurs="0"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index e42864f..1d12f83 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -53,6 +53,7 @@
         "HostRoles/actual_configs",
         "params/run_smoke_test",
         "HostRoles/stale_configs",
+        "HostRoles/reload_configs",
         "HostRoles/desired_admin_state",
         "HostRoles/maintenance_state",
         "HostRoles/service_name",

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
index 86aa3ec..14fcf6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
@@ -76,5 +76,8 @@
     <name>hadoop.caller.context.enabled</name>
     <value>true</value>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 38a38cc..8a0a782 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -157,6 +157,7 @@ public class ConfigHelperTest {
 
       cluster.addService("FLUME", repositoryVersion);
       cluster.addService("OOZIE", repositoryVersion);
+      cluster.addService("HDFS", repositoryVersion);
 
       final ClusterRequest clusterRequest2 =
           new ClusterRequest(cluster.getClusterId(), clusterName,
@@ -229,6 +230,45 @@ public class ConfigHelperTest {
       managementController.updateClusters(new HashSet<ClusterRequest>() {{
         add(clusterRequest5);
       }}, null);
+
+      // hdfs-site/hadoop.caller.context.enabled
+      ConfigurationRequest cr6 = new ConfigurationRequest();
+      cr6.setClusterName(clusterName);
+      cr6.setType("hdfs-site");
+      cr6.setVersionTag("version1");
+      cr6.setProperties(new HashMap<String, String>() {{
+        put("hadoop.caller.context.enabled", "true");
+      }});
+      cr6.setPropertiesAttributes(null);
+
+      final ClusterRequest clusterRequest6 =
+              new ClusterRequest(cluster.getClusterId(), clusterName,
+                      cluster.getDesiredStackVersion().getStackVersion(), null);
+
+      clusterRequest6.setDesiredConfig(Collections.singletonList(cr6));
+      managementController.updateClusters(new HashSet<ClusterRequest>() {{
+        add(clusterRequest6);
+      }}, null);
+
+      // hdfs-site/hadoop.caller.context.enabled
+      ConfigurationRequest cr7 = new ConfigurationRequest();
+      cr7.setClusterName(clusterName);
+      cr7.setType("hdfs-site");
+      cr7.setVersionTag("version2");
+      cr7.setProperties(new HashMap<String, String>() {{
+        put("hadoop.caller.context.enabled", "false");
+      }});
+      cr7.setPropertiesAttributes(null);
+
+      final ClusterRequest clusterRequest7 =
+              new ClusterRequest(cluster.getClusterId(), clusterName,
+                      cluster.getDesiredStackVersion().getStackVersion(), null);
+
+      clusterRequest7.setDesiredConfig(Collections.singletonList(cr7));
+      managementController.updateClusters(new HashSet<ClusterRequest>() {{
+        add(clusterRequest7);
+      }}, null);
+
     }
 
     @After
@@ -545,7 +585,7 @@ public class ConfigHelperTest {
               configHelper.getEffectiveDesiredTags(cluster, "h3"));
 
       Assert.assertNotNull(effectiveAttributes);
-      Assert.assertEquals(7, effectiveAttributes.size());
+      Assert.assertEquals(8, effectiveAttributes.size());
 
       Assert.assertTrue(effectiveAttributes.containsKey("global3"));
       Map<String, Map<String, String>> globalAttrs = effectiveAttributes.get("global3");
@@ -991,7 +1031,39 @@ public class ConfigHelperTest {
       Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
 
       verify(sch);
-    }
+  }
+
+  @Test
+  public void testCalculateRefreshCommands() throws Exception {
+
+    Map<String, HostConfig> schReturn = new HashMap<>();
+    HostConfig hc = new HostConfig();
+    // Put a different version to check for change
+    hc.setDefaultVersionTag("version1");
+    schReturn.put("hdfs-site", hc);
+
+    ServiceComponent sc = createNiceMock(ServiceComponent.class);
+
+    // set up mocks
+    ServiceComponentHost sch = createNiceMock(ServiceComponentHost.class);
+    expect(sc.getDesiredStackId()).andReturn(cluster.getDesiredStackVersion()).anyTimes();
+
+    // set up expectations
+    expect(sch.getActualConfigs()).andReturn(schReturn).anyTimes();
+    expect(sch.getHostName()).andReturn("h1").anyTimes();
+    expect(sch.getClusterId()).andReturn(cluster.getClusterId()).anyTimes();
+    expect(sch.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(sch.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+    expect(sch.getServiceComponent()).andReturn(sc).anyTimes();
+
+    replay(sc, sch);
+
+    Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
+    String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, sch);
+    Assert.assertEquals("reload_configs", refreshConfigsCommand);
+    verify(sch);
+  }
+
   }
 
   public static class RunWithCustomModule {

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
index 7a94ebf..4a04d0b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
@@ -137,6 +137,26 @@ public class PropertyInfoTest {
   }
 
   @Test
+  public void testBehaviorWithSupportedRefreshCommandsTags() throws JAXBException {
+    // given
+    String xml =
+    "<property>\n" +
+    " <name>prop_name</name>\n" +
+    " <value>prop_val</value>\n" +
+    " <supported-refresh-commands>\n" +
+    "   <refresh-command componentName=\"NAMENODE\" command=\"reload_configs\" />\n" +
+    " </supported-refresh-commands>\n" +
+    "</property>";
+
+    // when
+    PropertyInfo propertyInfo = propertyInfoFrom(xml);
+
+    // then
+    assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getCommand(), "reload_configs");
+    assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getComponentName(), "NAMENODE");
+  }
+
+  @Test
   public void testUnknownPropertyType() throws Exception {
     // Given
     String xml =

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index b1a4154..ef59e84 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -666,3 +666,20 @@ class TestDatanode(RMFTestCase):
     self.assertEquals(
       ('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
       mocks_dict['checked_call'].call_args_list[0][0][0])
+
+  def test_reload_configs(self):
+      with self.assertRaises(Fail):
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                             classname = "DataNode",
+                             command = "reload_configs",
+                             config_file = "default.json",
+                             stack_version = self.STACK_VERSION,
+                             target = RMFTestCase.TARGET_COMMON_SERVICES
+                             )
+
+          # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+          #                       tries=115,
+          #                       try_sleep=10,
+          #                       user="hdfs",
+          #                       logoutput=True
+          #                       )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 06e12f6..4e1124a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1745,6 +1745,39 @@ class TestNamenode(RMFTestCase):
     get_namenode_states_mock.return_value = active_namenodes, standby_namenodes, unknown_namenodes
     self.assertFalse(is_this_namenode_active())
 
+  def test_reloadproxyusers(self):
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                         classname = "NameNode",
+                         command = "reloadproxyusers",
+                         config_file = "default.json",
+                         stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+                         )
+
+      self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshSuperUserGroupsConfiguration',
+                                user = 'hdfs',
+                                conf_dir = '/etc/hadoop/conf',
+                                bin_dir = '/usr/bin')
+      self.assertNoMoreResources()
+
+  def test_reload_configs(self):
+      with self.assertRaises(Fail):
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                             classname = "NameNode",
+                             command = "reload_configs",
+                             config_file = "default.json",
+                             stack_version = self.STACK_VERSION,
+                             target = RMFTestCase.TARGET_COMMON_SERVICES
+                             )
+
+      # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+      #                       tries=115,
+      #                       try_sleep=10,
+      #                       user="hdfs",
+      #                       logoutput=True
+      #                       )
+
+
 
 class Popen_Mock:
   return_value = 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/57682942/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 28657eb..9e52a33 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -444,4 +444,12 @@ don't exist, they will be created with this permission.</description>
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>hadoop.caller.context.enabled</name>
+    <value>true</value>
+    <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
+  </property>
 </configuration>


[06/50] [abbrv] ambari git commit: AMBARI-14714. add new v2 interfaces for blueprint

Posted by ja...@apache.org.
AMBARI-14714. add new v2 interfaces for blueprint


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9c8ed58d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9c8ed58d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9c8ed58d

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 9c8ed58d11a904392bebb89b42049eed10932a17
Parents: 9306544
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri Oct 6 18:20:28 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Fri Oct 6 18:21:47 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/controller/StackV2.java       | 816 +++++++++++++++++++
 .../ambari/server/topology/BlueprintImplV2.java | 134 +++
 .../ambari/server/topology/BlueprintV2.java     | 179 ++++
 .../ambari/server/topology/ComponentV2.java     |  72 ++
 .../ambari/server/topology/HostGroupV2.java     | 124 +++
 5 files changed, 1325 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9c8ed58d/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
new file mode 100644
index 0000000..3e34951
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
@@ -0,0 +1,816 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.topology.Cardinality;
+import org.apache.ambari.server.topology.Configuration;
+
+import java.util.*;
+
+/**
+ * Encapsulates stack information.
+ */
+public class StackV2 {
+  /**
+   * Stack name
+   */
+  private String name;
+
+  /**
+   * Stack version
+   */
+  private String version;
+
+    /**
+     * Repo version
+   */
+  private String repoVersion;
+
+  /**
+   * Map of service name to components
+   */
+  private Map<String, Collection<String>> serviceComponents =
+    new HashMap<>();
+
+  /**
+   * Map of component to service
+   */
+  private Map<String, String> componentService = new HashMap<>();
+
+  /**
+   * Map of component to dependencies
+   */
+  private Map<String, Collection<DependencyInfo>> dependencies =
+    new HashMap<>();
+
+  /**
+   * Map of dependency to conditional service
+   */
+  private Map<DependencyInfo, String> dependencyConditionalServiceMap =
+    new HashMap<>();
+
+  /**
+   * Map of database component name to configuration property which indicates whether
+   * the database in to be managed or if it is an external non-managed instance.
+   * If the value of the config property starts with 'New', the database is determined
+   * to be managed, otherwise it is non-managed.
+   */
+  private Map<String, String> dbDependencyInfo = new HashMap<>();
+
+  /**
+   * Map of component to required cardinality
+   */
+  private Map<String, String> cardinalityRequirements = new HashMap<>();
+
+  //todo: instead of all these maps from component -> * ,
+  //todo: we should use a Component object with all of these attributes
+  private Set<String> masterComponents = new HashSet<>();
+
+  /**
+   * Map of component to auto-deploy information
+   */
+  private Map<String, AutoDeployInfo> componentAutoDeployInfo =
+    new HashMap<>();
+
+  /**
+   * Map of service to config type properties
+   */
+  private Map<String, Map<String, Map<String, ConfigProperty>>> serviceConfigurations =
+    new HashMap<>();
+
+  /**
+   * Map of service to required type properties
+   */
+  private Map<String, Map<String, Map<String, ConfigProperty>>> requiredServiceConfigurations =
+    new HashMap<>();
+
+  /**
+   * Map of service to config type properties
+   */
+  private Map<String, Map<String, ConfigProperty>> stackConfigurations =
+    new HashMap<>();
+
+  /**
+   * Map of service to set of excluded config types
+   */
+  private Map<String, Set<String>> excludedConfigurationTypes =
+    new HashMap<>();
+
+  /**
+   * Ambari Management Controller, used to obtain Stack definitions
+   */
+  private final AmbariManagementController controller;
+
+
+  /**
+   * Constructor.
+   *
+   * @param stack
+   *          the stack (not {@code null}).
+   * @param ambariManagementController
+   *          the management controller (not {@code null}).
+   * @throws AmbariException
+   */
+  public StackV2(StackEntity stack, AmbariManagementController ambariManagementController) throws AmbariException {
+    this(stack.getStackName(), stack.getStackVersion(), ambariManagementController);
+  }
+
+  /**
+   * Constructor.
+   *
+   * @param name     stack name
+   * @param version  stack version
+   *
+   * @throws AmbariException an exception occurred getting stack information
+   *                         for the specified name and version
+   */
+  //todo: don't pass management controller in constructor
+  public StackV2(String name, String version, AmbariManagementController controller) throws AmbariException {
+    this.name = name;
+    this.version = version;
+    this.controller = controller;
+
+    Set<StackServiceResponse> stackServices = controller.getStackServices(
+        Collections.singleton(new StackServiceRequest(name, version, null)));
+
+    for (StackServiceResponse stackService : stackServices) {
+      String serviceName = stackService.getServiceName();
+      parseComponents(serviceName);
+      parseExcludedConfigurations(stackService);
+      parseConfigurations(stackService);
+      registerConditionalDependencies();
+    }
+
+    //todo: already done for each service
+    parseStackConfigurations();
+  }
+
+  /**
+   * Obtain stack name.
+   *
+   * @return stack name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Obtain stack version.
+   *
+   * @return stack version
+   */
+  public String getVersion() {
+    return version;
+  }
+
+  /**
+   * Obtain repo version.
+   * @return
+   */
+  public String getRepoVersion() { return repoVersion; }
+
+  Map<DependencyInfo, String> getDependencyConditionalServiceMap() {
+    return dependencyConditionalServiceMap;
+  }
+
+  /**
+   * Get services contained in the stack.
+   *
+   * @return collection of all services for the stack
+   */
+  public Collection<String> getServices() {
+    return serviceComponents.keySet();
+  }
+
+  /**
+   * Get components contained in the stack for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of component names for the specified service
+   */
+  public Collection<String> getComponents(String service) {
+    return serviceComponents.get(service);
+  }
+
+  /**
+   * Get all service components
+   *
+   * @return map of service to associated components
+   */
+  public Map<String, Collection<String>> getComponents() {
+    Map<String, Collection<String>> serviceComponents = new HashMap<>();
+    for (String service : getServices()) {
+      Collection<String> components = new HashSet<>();
+      components.addAll(getComponents(service));
+      serviceComponents.put(service, components);
+    }
+    return serviceComponents;
+  }
+
+  /**
+   * Get info for the specified component.
+   *
+   * @param component  component name
+   *
+   * @return component information for the requested component
+   *         or null if the component doesn't exist in the stack
+   */
+  public ComponentInfo getComponentInfo(String component) {
+    ComponentInfo componentInfo = null;
+    String service = getServiceForComponent(component);
+    if (service != null) {
+      try {
+        componentInfo = controller.getAmbariMetaInfo().getComponent(
+            getName(), getVersion(), service, component);
+      } catch (AmbariException e) {
+        // just return null if component doesn't exist
+      }
+    }
+    return componentInfo;
+  }
+
+  /**
+   * Get all configuration types, including excluded types for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of all configuration types for the specified service
+   */
+  public Collection<String> getAllConfigurationTypes(String service) {
+    return serviceConfigurations.get(service).keySet();
+  }
+
+  /**
+   * Get configuration types for the specified service.
+   * This doesn't include any service excluded types.
+   *
+   * @param service  service name
+   *
+   * @return collection of all configuration types for the specified service
+   */
+  public Collection<String> getConfigurationTypes(String service) {
+    Set<String> serviceTypes = new HashSet<>(serviceConfigurations.get(service).keySet());
+    serviceTypes.removeAll(getExcludedConfigurationTypes(service));
+
+    return serviceTypes;
+  }
+
+  /**
+   * Get the set of excluded configuration types for this service.
+   *
+   * @param service service name
+   *
+   * @return Set of names of excluded config types. Will not return null.
+   */
+  public Set<String> getExcludedConfigurationTypes(String service) {
+    return excludedConfigurationTypes.containsKey(service) ?
+        excludedConfigurationTypes.get(service) :
+        Collections.emptySet();
+  }
+
+  /**
+   * Get config properties for the specified service and configuration type.
+   *
+   * @param service  service name
+   * @param type     configuration type
+   *
+   * @return map of property names to values for the specified service and configuration type
+   */
+  public Map<String, String> getConfigurationProperties(String service, String type) {
+    Map<String, String> configMap = new HashMap<>();
+    Map<String, ConfigProperty> configProperties = serviceConfigurations.get(service).get(type);
+    if (configProperties != null) {
+      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
+        configMap.put(configProperty.getKey(), configProperty.getValue().getValue());
+      }
+    }
+    return configMap;
+  }
+
+  public Map<String, ConfigProperty> getConfigurationPropertiesWithMetadata(String service, String type) {
+    return serviceConfigurations.get(service).get(type);
+  }
+
+  /**
+   * Get all required config properties for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of all required properties for the given service
+   */
+  public Collection<ConfigProperty> getRequiredConfigurationProperties(String service) {
+    Collection<ConfigProperty> requiredConfigProperties = new HashSet<>();
+    Map<String, Map<String, ConfigProperty>> serviceProperties = requiredServiceConfigurations.get(service);
+    if (serviceProperties != null) {
+      for (Map.Entry<String, Map<String, ConfigProperty>> typePropertiesEntry : serviceProperties.entrySet()) {
+        requiredConfigProperties.addAll(typePropertiesEntry.getValue().values());
+      }
+    }
+    return requiredConfigProperties;
+  }
+
+  /**
+   * Get required config properties for the specified service which belong to the specified property type.
+   *
+   * @param service       service name
+   * @param propertyType  property type
+   *
+   * @return collection of required properties for the given service and property type
+   */
+  public Collection<ConfigProperty> getRequiredConfigurationProperties(String service, PropertyInfo.PropertyType propertyType) {
+    Collection<ConfigProperty> matchingProperties = new HashSet<>();
+    Map<String, Map<String, ConfigProperty>> requiredProperties = requiredServiceConfigurations.get(service);
+    if (requiredProperties != null) {
+      for (Map.Entry<String, Map<String, ConfigProperty>> typePropertiesEntry : requiredProperties.entrySet()) {
+        for (ConfigProperty configProperty : typePropertiesEntry.getValue().values()) {
+          if (configProperty.getPropertyTypes().contains(propertyType)) {
+            matchingProperties.add(configProperty);
+          }
+        }
+
+      }
+    }
+    return matchingProperties;
+  }
+
+  public boolean isPasswordProperty(String service, String type, String propertyName) {
+    return (serviceConfigurations.containsKey(service) &&
+            serviceConfigurations.get(service).containsKey(type) &&
+            serviceConfigurations.get(service).get(type).containsKey(propertyName) &&
+            serviceConfigurations.get(service).get(type).get(propertyName).getPropertyTypes().
+                contains(PropertyInfo.PropertyType.PASSWORD));
+  }
+
+  //todo
+  public Map<String, String> getStackConfigurationProperties(String type) {
+    Map<String, String> configMap = new HashMap<>();
+    Map<String, ConfigProperty> configProperties = stackConfigurations.get(type);
+    if (configProperties != null) {
+      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
+        configMap.put(configProperty.getKey(), configProperty.getValue().getValue());
+      }
+    }
+    return configMap;
+  }
+
+  public boolean isKerberosPrincipalNameProperty(String service, String type, String propertyName) {
+    return (serviceConfigurations.containsKey(service) &&
+            serviceConfigurations.get(service).containsKey(type) &&
+            serviceConfigurations.get(service).get(type).containsKey(propertyName) &&
+            serviceConfigurations.get(service).get(type).get(propertyName).getPropertyTypes().
+                contains(PropertyInfo.PropertyType.KERBEROS_PRINCIPAL));
+  }
+  /**
+   * Get config attributes for the specified service and configuration type.
+   *
+   * @param service  service name
+   * @param type     configuration type
+   *
+   * @return  map of attribute names to map of property names to attribute values
+   *          for the specified service and configuration type
+   */
+  public Map<String, Map<String, String>> getConfigurationAttributes(String service, String type) {
+    Map<String, Map<String, String>> attributesMap = new HashMap<>();
+    Map<String, ConfigProperty> configProperties = serviceConfigurations.get(service).get(type);
+    if (configProperties != null) {
+      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
+        String propertyName = configProperty.getKey();
+        Map<String, String> propertyAttributes = configProperty.getValue().getAttributes();
+        if (propertyAttributes != null) {
+          for (Map.Entry<String, String> propertyAttribute : propertyAttributes.entrySet()) {
+            String attributeName = propertyAttribute.getKey();
+            String attributeValue = propertyAttribute.getValue();
+            if (attributeValue != null) {
+              Map<String, String> attributes = attributesMap.get(attributeName);
+              if (attributes == null) {
+                  attributes = new HashMap<>();
+                  attributesMap.put(attributeName, attributes);
+              }
+              attributes.put(propertyName, attributeValue);
+            }
+          }
+        }
+      }
+    }
+    return attributesMap;
+  }
+
+  //todo:
+  public Map<String, Map<String, String>> getStackConfigurationAttributes(String type) {
+    Map<String, Map<String, String>> attributesMap = new HashMap<>();
+    Map<String, ConfigProperty> configProperties = stackConfigurations.get(type);
+    if (configProperties != null) {
+      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
+        String propertyName = configProperty.getKey();
+        Map<String, String> propertyAttributes = configProperty.getValue().getAttributes();
+        if (propertyAttributes != null) {
+          for (Map.Entry<String, String> propertyAttribute : propertyAttributes.entrySet()) {
+            String attributeName = propertyAttribute.getKey();
+            String attributeValue = propertyAttribute.getValue();
+            Map<String, String> attributes = attributesMap.get(attributeName);
+            if (attributes == null) {
+              attributes = new HashMap<>();
+              attributesMap.put(attributeName, attributes);
+            }
+            attributes.put(propertyName, attributeValue);
+          }
+        }
+      }
+    }
+    return attributesMap;
+  }
+
+  /**
+   * Get the service for the specified component.
+   *
+   * @param component  component name
+   *
+   * @return service name that contains tha specified component
+   */
+  public String getServiceForComponent(String component) {
+    return componentService.get(component);
+  }
+
+  /**
+   * Get the names of the services which contains the specified components.
+   *
+   * @param components collection of components
+   *
+   * @return collection of services which contain the specified components
+   */
+  public Collection<String> getServicesForComponents(Collection<String> components) {
+    Set<String> services = new HashSet<>();
+    for (String component : components) {
+      services.add(getServiceForComponent(component));
+    }
+
+    return services;
+  }
+
+  /**
+   * Obtain the service name which corresponds to the specified configuration.
+   *
+   * @param config  configuration type
+   *
+   * @return name of service which corresponds to the specified configuration type
+   */
+  public String getServiceForConfigType(String config) {
+    for (Map.Entry<String, Map<String, Map<String, ConfigProperty>>> entry : serviceConfigurations.entrySet()) {
+      Map<String, Map<String, ConfigProperty>> typeMap = entry.getValue();
+      String serviceName = entry.getKey();
+      if (typeMap.containsKey(config) && !getExcludedConfigurationTypes(serviceName).contains(config)) {
+        return serviceName;
+      }
+    }
+    throw new IllegalArgumentException(
+        "Specified configuration type is not associated with any service: " + config);
+  }
+
+  /**
+   * Return the dependencies specified for the given component.
+   *
+   * @param component  component to get dependency information for
+   *
+   * @return collection of dependency information for the specified component
+   */
+  //todo: full dependency graph
+  public Collection<DependencyInfo> getDependenciesForComponent(String component) {
+    return dependencies.containsKey(component) ? dependencies.get(component) :
+        Collections.emptySet();
+  }
+
+  /**
+   * Get the service, if any, that a component dependency is conditional on.
+   *
+   * @param dependency  dependency to get conditional service for
+   *
+   * @return conditional service for provided component or null if dependency
+   *         is not conditional on a service
+   */
+  public String getConditionalServiceForDependency(DependencyInfo dependency) {
+    return dependencyConditionalServiceMap.get(dependency);
+  }
+
+  public String getExternalComponentConfig(String component) {
+    return dbDependencyInfo.get(component);
+  }
+
+  /**
+   * Obtain the required cardinality for the specified component.
+   */
+  public Cardinality getCardinality(String component) {
+    return new Cardinality(cardinalityRequirements.get(component));
+  }
+
+  /**
+   * Obtain auto-deploy information for the specified component.
+   */
+  public AutoDeployInfo getAutoDeployInfo(String component) {
+    return componentAutoDeployInfo.get(component);
+  }
+
+  public boolean isMasterComponent(String component) {
+    return masterComponents.contains(component);
+  }
+
+  public Configuration getConfiguration(Collection<String> services) {
+    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
+    Map<String, Map<String, String>> properties = new HashMap<>();
+
+    for (String service : services) {
+      Collection<String> serviceConfigTypes = getConfigurationTypes(service);
+      for (String type : serviceConfigTypes) {
+        Map<String, String> typeProps = properties.get(type);
+        if (typeProps == null) {
+          typeProps = new HashMap<>();
+          properties.put(type, typeProps);
+        }
+        typeProps.putAll(getConfigurationProperties(service, type));
+
+        Map<String, Map<String, String>> stackTypeAttributes = getConfigurationAttributes(service, type);
+        if (!stackTypeAttributes.isEmpty()) {
+          if (! attributes.containsKey(type)) {
+            attributes.put(type, new HashMap<>());
+          }
+          Map<String, Map<String, String>> typeAttributes = attributes.get(type);
+          for (Map.Entry<String, Map<String, String>> attribute : stackTypeAttributes.entrySet()) {
+            String attributeName = attribute.getKey();
+            Map<String, String> attributeProps = typeAttributes.get(attributeName);
+            if (attributeProps == null) {
+              attributeProps = new HashMap<>();
+              typeAttributes.put(attributeName, attributeProps);
+            }
+            attributeProps.putAll(attribute.getValue());
+          }
+        }
+      }
+    }
+    return new Configuration(properties, attributes);
+  }
+
+  public Configuration getConfiguration() {
+    Map<String, Map<String, Map<String, String>>> stackAttributes = new HashMap<>();
+    Map<String, Map<String, String>> stackConfigs = new HashMap<>();
+
+    for (String service : getServices()) {
+      for (String type : getAllConfigurationTypes(service)) {
+        Map<String, String> typeProps = stackConfigs.get(type);
+        if (typeProps == null) {
+          typeProps = new HashMap<>();
+          stackConfigs.put(type, typeProps);
+        }
+        typeProps.putAll(getConfigurationProperties(service, type));
+
+        Map<String, Map<String, String>> stackTypeAttributes = getConfigurationAttributes(service, type);
+        if (!stackTypeAttributes.isEmpty()) {
+          if (! stackAttributes.containsKey(type)) {
+            stackAttributes.put(type, new HashMap<>());
+          }
+          Map<String, Map<String, String>> typeAttrs = stackAttributes.get(type);
+          for (Map.Entry<String, Map<String, String>> attribute : stackTypeAttributes.entrySet()) {
+            String attributeName = attribute.getKey();
+            Map<String, String> attributes = typeAttrs.get(attributeName);
+            if (attributes == null) {
+              attributes = new HashMap<>();
+              typeAttrs.put(attributeName, attributes);
+            }
+            attributes.putAll(attribute.getValue());
+          }
+        }
+      }
+    }
+    return new Configuration(stackConfigs, stackAttributes);
+  }
+
+  /**
+   * Parse components for the specified service from the stack definition.
+   *
+   * @param service  service name
+   *
+   * @throws AmbariException an exception occurred getting components from the stack definition
+   */
+  private void parseComponents(String service) throws AmbariException{
+    Collection<String> componentSet = new HashSet<>();
+
+    Set<StackServiceComponentResponse> components = controller.getStackComponents(
+        Collections.singleton(new StackServiceComponentRequest(name, version, service, null)));
+
+    // stack service components
+    for (StackServiceComponentResponse component : components) {
+      String componentName = component.getComponentName();
+      componentSet.add(componentName);
+      componentService.put(componentName, service);
+      String cardinality = component.getCardinality();
+      if (cardinality != null) {
+        cardinalityRequirements.put(componentName, cardinality);
+      }
+      AutoDeployInfo autoDeploy = component.getAutoDeploy();
+      if (autoDeploy != null) {
+        componentAutoDeployInfo.put(componentName, autoDeploy);
+      }
+
+      // populate component dependencies
+      //todo: remove usage of AmbariMetaInfo
+      Collection<DependencyInfo> componentDependencies = controller.getAmbariMetaInfo().getComponentDependencies(
+          name, version, service, componentName);
+
+      if (componentDependencies != null && ! componentDependencies.isEmpty()) {
+        dependencies.put(componentName, componentDependencies);
+      }
+      if (component.isMaster()) {
+        masterComponents.add(componentName);
+      }
+    }
+    serviceComponents.put(service, componentSet);
+  }
+
+  /**
+   * Parse configurations for the specified service from the stack definition.
+   *
+   * @param stackService  service to parse the stack configuration for
+   *
+   * @throws AmbariException an exception occurred getting configurations from the stack definition
+   */
+  private void parseConfigurations(StackServiceResponse stackService) throws AmbariException {
+    String service = stackService.getServiceName();
+    Map<String, Map<String, ConfigProperty>> mapServiceConfig = new HashMap<>();
+    Map<String, Map<String, ConfigProperty>> mapRequiredServiceConfig = new HashMap<>();
+
+
+    serviceConfigurations.put(service, mapServiceConfig);
+    requiredServiceConfigurations.put(service, mapRequiredServiceConfig);
+
+    Set<StackConfigurationResponse> serviceConfigs = controller.getStackConfigurations(
+        Collections.singleton(new StackConfigurationRequest(name, version, service, null)));
+    Set<StackConfigurationResponse> stackLevelConfigs = controller.getStackLevelConfigurations(
+        Collections.singleton(new StackLevelConfigurationRequest(name, version, null)));
+    serviceConfigs.addAll(stackLevelConfigs);
+
+    // shouldn't have any required properties in stack level configuration
+    for (StackConfigurationResponse config : serviceConfigs) {
+      ConfigProperty configProperty = new ConfigProperty(config);
+      String type = configProperty.getType();
+
+      Map<String, ConfigProperty> mapTypeConfig = mapServiceConfig.get(type);
+      if (mapTypeConfig == null) {
+        mapTypeConfig = new HashMap<>();
+        mapServiceConfig.put(type, mapTypeConfig);
+      }
+
+      mapTypeConfig.put(config.getPropertyName(), configProperty);
+      if (config.isRequired()) {
+        Map<String, ConfigProperty> requiredTypeConfig = mapRequiredServiceConfig.get(type);
+        if (requiredTypeConfig == null) {
+          requiredTypeConfig = new HashMap<>();
+          mapRequiredServiceConfig.put(type, requiredTypeConfig);
+        }
+        requiredTypeConfig.put(config.getPropertyName(), configProperty);
+      }
+    }
+
+    // So far we added only config types that have properties defined
+    // in stack service definition. Since there might be config types
+    // with no properties defined we need to add those separately
+    Set<String> configTypes = stackService.getConfigTypes().keySet();
+    for (String configType: configTypes) {
+      if (!mapServiceConfig.containsKey(configType)) {
+        mapServiceConfig.put(configType, Collections.emptyMap());
+      }
+    }
+  }
+
+  private void parseStackConfigurations () throws AmbariException {
+
+    Set<StackConfigurationResponse> stackLevelConfigs = controller.getStackLevelConfigurations(
+        Collections.singleton(new StackLevelConfigurationRequest(name, version, null)));
+
+    for (StackConfigurationResponse config : stackLevelConfigs) {
+      ConfigProperty configProperty = new ConfigProperty(config);
+      String type = configProperty.getType();
+
+      Map<String, ConfigProperty> mapTypeConfig = stackConfigurations.get(type);
+      if (mapTypeConfig == null) {
+        mapTypeConfig = new HashMap<>();
+        stackConfigurations.put(type, mapTypeConfig);
+      }
+
+      mapTypeConfig.put(config.getPropertyName(),
+          configProperty);
+    }
+  }
+
+  /**
+   * Obtain the excluded configuration types from the StackServiceResponse
+   *
+   * @param stackServiceResponse the response object associated with this stack service
+   */
+  private void parseExcludedConfigurations(StackServiceResponse stackServiceResponse) {
+    excludedConfigurationTypes.put(stackServiceResponse.getServiceName(), stackServiceResponse.getExcludedConfigTypes());
+  }
+
+  /**
+   * Register conditional dependencies.
+   */
+  //todo: This information should be specified in the stack definition.
+  void registerConditionalDependencies() {
+    dbDependencyInfo.put("MYSQL_SERVER", "global/hive_database");
+  }
+
+  /**
+   * Contains a configuration property's value and attributes.
+   */
+  public static class ConfigProperty {
+    private ValueAttributesInfo propertyValueAttributes = null;
+    private String name;
+    private String value;
+    private Map<String, String> attributes;
+    private Set<PropertyInfo.PropertyType> propertyTypes;
+    private String type;
+    private Set<PropertyDependencyInfo> dependsOnProperties =
+      Collections.emptySet();
+
+    public ConfigProperty(StackConfigurationResponse config) {
+      this.name = config.getPropertyName();
+      this.value = config.getPropertyValue();
+      this.attributes = config.getPropertyAttributes();
+      this.propertyTypes = config.getPropertyType();
+      this.type = normalizeType(config.getType());
+      this.dependsOnProperties = config.getDependsOnProperties();
+      this.propertyValueAttributes = config.getPropertyValueAttributes();
+    }
+
+    public ConfigProperty(String type, String name, String value) {
+      this.type = type;
+      this.name = name;
+      this.value = value;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public String getValue() {
+      return value;
+    }
+
+    public void setValue(String value) {
+      this.value = value;
+    }
+
+    public String getType() {
+      return type;
+    }
+
+    public Set<PropertyInfo.PropertyType> getPropertyTypes() {
+      return propertyTypes;
+    }
+
+    public void setPropertyTypes(Set<PropertyInfo.PropertyType> propertyTypes) {
+      this.propertyTypes = propertyTypes;
+    }
+
+    public Map<String, String> getAttributes() {
+      return attributes;
+    }
+
+    public void setAttributes(Map<String, String> attributes) {
+      this.attributes = attributes;
+    }
+
+    Set<PropertyDependencyInfo> getDependsOnProperties() {
+      return this.dependsOnProperties;
+    }
+
+    private String normalizeType(String type) {
+      //strip .xml from type
+      if (type.endsWith(".xml")) {
+        type = type.substring(0, type.length() - 4);
+      }
+      return type;
+    }
+
+    public ValueAttributesInfo getPropertyValueAttributes() {
+      return propertyValueAttributes;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c8ed58d/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
new file mode 100644
index 0000000..fa893b3b
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distribut
+ * ed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.orm.entities.BlueprintEntity;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Blueprint implementation.
+ */
+public class BlueprintImplV2 implements BlueprintV2 {
+
+
+  @Override
+  public String getName() {
+    return null;
+  }
+
+  @Override
+  public HostGroupV2 getHostGroup(String name) {
+    return null;
+  }
+
+  @Override
+  public Map<String, HostGroupV2> getHostGroups() {
+    return null;
+  }
+
+  @Override
+  public Collection<StackV2> getStacks() {
+    return null;
+  }
+
+  @Override
+  public Collection<ServiceGroup> getServiceGroups() {
+    return null;
+  }
+
+  @Override
+  public Collection<Service> getAllServices() {
+    return null;
+  }
+
+  @Override
+  public Collection<ComponentV2> getComponents(Service service) {
+    return null;
+  }
+
+  @Override
+  public Collection<HostGroupV2> getHostGroupsForService(Service service) {
+    return null;
+  }
+
+  @Override
+  public Collection<HostGroupV2> getHostGroupsForComponent(ComponentV2 component) {
+    return null;
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return null;
+  }
+
+  @Override
+  public Setting getSetting() {
+    return null;
+  }
+
+  @Override
+  public String getRecoveryEnabled(String serviceName, String componentName) {
+    return null;
+  }
+
+  @Override
+  public String getCredentialStoreEnabled(String serviceName) {
+    return null;
+  }
+
+  @Override
+  public boolean shouldSkipFailure() {
+    return false;
+  }
+
+  @Override
+  public SecurityConfiguration getSecurity() {
+    return null;
+  }
+
+  @Override
+  public void validateTopology() throws InvalidTopologyException {
+
+  }
+
+  @Override
+  public void validateRequiredProperties() throws InvalidTopologyException {
+
+  }
+
+  @Override
+  public boolean isValidConfigType(String configType) {
+    return false;
+  }
+
+  @Override
+  public BlueprintEntity toEntity() {
+    return null;
+  }
+
+  @Override
+  public List<RepositorySetting> getRepositorySettings() {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c8ed58d/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
new file mode 100644
index 0000000..134a614
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.orm.entities.BlueprintEntity;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Blueprint representation.
+ */
+public interface BlueprintV2 {
+
+  /**
+   * Get the name of the blueprint.
+   *
+   * @return blueprint name
+   */
+  String getName();
+
+  /**
+   * Get a hostgroup specified by name.
+   *
+   * @param name  name of the host group to get
+   *
+   * @return the host group with the given name or null
+   */
+  HostGroupV2 getHostGroup(String name);
+
+  /**
+   * Get the hot groups contained in the blueprint.
+   * @return map of host group name to host group
+   */
+  Map<String, HostGroupV2> getHostGroups();
+
+  /**
+   * Get  stacks associated with the blueprint.
+   *
+   * @return associated stacks
+   */
+  Collection<StackV2> getStacks();
+
+
+  Collection<ServiceGroup> getServiceGroups();
+
+  /**
+   * Get all of the services represented in the blueprint.
+   *
+   * @return collection of all represented service names
+   */
+  Collection<Service> getAllServices();
+
+  /**
+   * Get the components that are included in the blueprint for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of component names for the service.  Will not return null.
+   */
+  Collection<ComponentV2> getComponents(Service service);
+
+
+  /**
+   * Get the host groups which contain components for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of host groups containing components for the specified service;
+   *         will not return null
+   */
+  Collection<HostGroupV2> getHostGroupsForService(Service service);
+
+  /**
+   * Get the host groups which contain the give component.
+   *
+   * @param component  component name
+   *
+   * @return collection of host groups containing the specified component; will not return null
+   */
+  Collection<HostGroupV2> getHostGroupsForComponent(ComponentV2 component);
+
+  /**
+   * Get the Blueprint cluster scoped configuration.
+   * The blueprint cluster scoped configuration has the stack
+   * configuration with the config types associated with the blueprint
+   * set as it's parent.
+   *
+   * @return blueprint cluster scoped configuration
+   */
+  Configuration getConfiguration();
+
+  /**
+   * Get the Blueprint cluster scoped setting.
+   * The blueprint cluster scoped setting has the setting properties
+   * with the setting names associated with the blueprint.
+   *
+   * @return blueprint cluster scoped setting
+   */
+  Setting getSetting();
+
+
+  /**
+   * Get whether a component is enabled for auto start.
+   *
+   * @param serviceName - Service name.
+   * @param componentName - Component name.
+   *
+   * @return null if value is not specified; true or false if specified.
+   */
+  String getRecoveryEnabled(String serviceName, String componentName);
+
+  /**
+   * Get whether a service is enabled for credential store use.
+   *
+   * @param serviceName - Service name.
+   *
+   * @return null if value is not specified; true or false if specified.
+   */
+  String getCredentialStoreEnabled(String serviceName);
+
+  /**
+   * Check if auto skip failure is enabled.
+   * @return true if enabled, otherwise false.
+   */
+  boolean shouldSkipFailure();
+
+
+  SecurityConfiguration getSecurity();
+
+  /**
+   * Validate the blueprint topology.
+   *
+   * @throws InvalidTopologyException if the topology is invalid
+   */
+  void validateTopology() throws InvalidTopologyException;
+
+  /**
+   * Validate that the blueprint contains all of the required properties.
+   *
+   * @throws InvalidTopologyException if the blueprint doesn't contain all required properties
+   */
+  void validateRequiredProperties() throws InvalidTopologyException;
+
+  /**
+   *
+   * A config type is valid if there are services related to except cluster-env and global.
+   * @param configType
+   * @return
+   */
+  boolean isValidConfigType(String configType);
+
+  /**
+   * Obtain the blueprint as an entity.
+   *
+   * @return entity representation of the blueprint
+   */
+  BlueprintEntity toEntity();
+
+  List<RepositorySetting> getRepositorySettings();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c8ed58d/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
new file mode 100644
index 0000000..175fe99
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+
+import org.apache.ambari.server.controller.internal.ProvisionAction;
+
+public class ComponentV2 {
+
+  private final String name;
+
+  private final Service service;
+
+  private final ProvisionAction provisionAction;
+
+  private final Configuration configuration;
+
+
+  public ComponentV2(String name, Service service) {
+    this(name, service, null, null);
+  }
+
+  public ComponentV2(String name, Service service, ProvisionAction provisionAction, Configuration configuration) {
+    this.name = name;
+    this.service = service;
+    this.provisionAction = provisionAction;
+    this.configuration = configuration;
+  }
+
+  /**
+   * Gets the name of this component
+   *
+   * @return component name
+   */
+  public String getName() {
+    return this.name;
+  }
+
+  /**
+   * Gets the provision action associated with this component.
+   *
+   * @return the provision action for this component, which
+   *         may be null if the default action is to be used
+   */
+  public ProvisionAction getProvisionAction() {
+    return this.provisionAction;
+  }
+
+  public Service getService() {
+    return service;
+  }
+
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c8ed58d/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
new file mode 100644
index 0000000..506fed0
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+import org.apache.ambari.server.controller.internal.ProvisionAction;
+
+import java.util.Collection;
+import java.util.regex.Pattern;
+
+/**
+ * Host Group representation.
+ */
+public interface HostGroupV2 {
+
+  /**
+   * Compiled regex for hostgroup token.
+   */
+  Pattern HOSTGROUP_REGEX = Pattern.compile("%HOSTGROUP::(\\S+?)%");
+  /**
+   * Get the name of the host group.
+   *
+   * @return the host group name
+   */
+  String getName();
+
+  /**
+   * Get the name of the associated blueprint
+   *
+   * @return associated blueprint name
+   */
+  String getBlueprintName();
+
+  /**
+   * Get the fully qualified host group name in the form of
+   * blueprintName:hostgroupName
+   *
+   * @return fully qualified host group name
+   */
+  String getFullyQualifiedName();
+
+  /**
+   * Get all of the host group components.
+   *
+   * @return collection of component instances
+   */
+  Collection<ComponentV2> getComponents();
+
+  /**
+   * Get all of the host group component names
+   *
+   * @return collection of component names as String
+   */
+  Collection<String> getComponentNames();
+
+  /**
+   * Get all host group component names for instances
+   *   that have the specified provision action association.
+   *
+   * @param provisionAction the provision action that must be associated
+   *                          with the component names returned
+   *
+   * @return collection of component names as String that are associated with
+   *           the specified provision action
+   */
+  Collection<String> getComponentNames(ProvisionAction provisionAction);
+
+  /**
+   * Get the host group components which belong to the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of component names for the specified service; will not return null
+   */
+  Collection<ComponentV2> getComponents(Service service);
+
+  /**
+   * Determine if the host group contains a master component.
+   *
+   * @return true if the host group contains a master component; false otherwise
+   */
+  boolean containsMasterComponent();
+
+  /**
+   * Get all of the services associated with the host group components.
+   *
+   * @return collection of service names
+   */
+  Collection<Service> getServices();
+
+  /**
+   * Get the configuration associated with the host group.
+   * The host group configuration has the blueprint cluster scoped
+   * configuration set as it's parent.
+   *
+   * @return host group configuration
+   */
+  Configuration getConfiguration();
+
+  /**
+   * Get the cardinality value that was specified for the host group.
+   * This is simply meta-data for the stack that a deployer can use
+   * and this information is not used by ambari.
+   *
+   * @return the cardinality specified for the hostgroup
+   */
+  String getCardinality();
+}
+


[29/50] [abbrv] ambari git commit: Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

Posted by ja...@apache.org.
Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

This reverts commit fc80a1837cc613160e3c60cc3290b7e517b5cd45.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f1c4626b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f1c4626b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f1c4626b

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: f1c4626b9b4a6aafc48b71bd7d4e892362af1843
Parents: beef96d
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 12:22:35 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 12:22:35 2017 -0400

----------------------------------------------------------------------
 .../libraries/script/script.py                  |   44 +-
 .../resources/Ambari-DDL-AzureDB-CREATE.sql     | 2147 ------------------
 ambari-server/snippet/Snippet.java              |    8 -
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |    2 -
 .../src/test/python/stacks/utils/RMFTestCase.py |    4 +-
 5 files changed, 15 insertions(+), 2190 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index cd8fce4..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,7 +501,6 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
-
     return Script.stack_version_from_distro_select
 
 
@@ -526,20 +525,22 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-
+    
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    if not STACK_VERSION_PLACEHOLDER in name:
-      return name
-
     stack_version_package_formatted = ""
 
+    if not repo_version:
+      repo_version = self.get_stack_version_before_packages_installed()
+
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -549,16 +550,6 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
-    if package_version is None or '-' not in package_version:
-        self.load_available_packages()
-        package_name = self.get_package_from_available(name, self.available_packages_in_repos)
-        if package_name is None:
-          raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
-        return package_name
-        
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
-
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -577,7 +568,6 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
-      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -770,17 +760,6 @@ class Script(object):
     """
     self.install_packages(env)
 
-  def load_available_packages(self):
-    if self.available_packages_in_repos:
-      return self.available_packages_in_repos
-
-    pkg_provider = get_provider("Package")
-    try:
-      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(Script.config['repositoryFile']['repositories'])
-    except Exception as err:
-      Logger.exception("Unable to load available packages")
-      self.available_packages_in_repos = []
-
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -800,14 +779,20 @@ class Script(object):
         return
       pass
     try:
+      package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
-      package_list_str = config['hostLevelParams']['package_list']
+      pkg_provider = get_provider("Package")
+      try:
+        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
+      except Exception as err:
+        Logger.exception("Unable to load available packages")
+        available_packages_in_repos = []
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.format_package_name(package['name'])
+            name = self.get_package_from_available(package['name'], available_packages_in_repos)
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1107,6 +1092,5 @@ class Script(object):
 
 
   def __init__(self):
-    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")


[04/50] [abbrv] ambari git commit: AMBARI-22161. Unable to ignore service check failures during Upgrade (alexantonenko)

Posted by ja...@apache.org.
AMBARI-22161. Unable to ignore service check failures during Upgrade (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a9d26986
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a9d26986
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a9d26986

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: a9d26986d615cee43de6bddca12fd0a23b6526b8
Parents: 388cb41
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Fri Oct 6 17:10:48 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Fri Oct 6 17:10:48 2017 +0300

----------------------------------------------------------------------
 .../admin/stack_upgrade/upgrade_wizard_view.js  |  2 +-
 .../stack_upgrade/upgrade_wizard_view_test.js   | 28 +++++++++++++++-----
 2 files changed, 23 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a9d26986/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index e1689c2..87e2adf 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -126,7 +126,7 @@ App.upgradeWizardView = Em.View.extend({
     var associatedVersion = this.get('controller.upgradeData.Upgrade.associated_version');
     var version = associatedVersion && App.RepositoryVersion.find().findProperty('repositoryVersion', associatedVersion);
     var isPatchOrMaint = version && ( version.get('isPatch') || version.get('isMaint') );
-    return failedItem && failedItem.get('skippable') && !isPatchOrMaint;
+    return failedItem && failedItem.get('skippable') && !(this.get('isFinalizeItem') && isPatchOrMaint);
   }.property('failedItem'),
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9d26986/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index 0107975..a739838 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -919,20 +919,36 @@ describe('App.upgradeWizardView', function () {
     beforeEach(function () {
       view.reopen({'failedItem': Em.Object.create({skippable: true}) });
       view.set('controller.upgradeData.Upgrade', {associated_version: '2.1.1'});
+      var findResult = [Em.Object.create({repositoryVersion: '2.1.1', isPatch: true})];
+      sinon.stub(App.RepositoryVersion, 'find', function(){
+        return findResult;
+      });
+    });
+
+    afterEach(function () {
+      App.RepositoryVersion.find.restore();
     })
     it("Should return true if can not find upgrade", function () {
       view.propertyDidChange('canSkipFailedItem');
-      expect(view.get('canSkipFailedItem')).to.be.true
+      expect(view.get('canSkipFailedItem')).to.be.true;
     });
 
-    it("Should return false if upgrade is patch or maint", function () {
-      var findResult = [Em.Object.create({repositoryVersion: '2.1.1', isPatch: true})];
-      sinon.stub(App.RepositoryVersion, 'find', function(){
-        return findResult;
+    it("Should return false if upgrade is patch or maint and item is final", function () {
+      view.reopen({
+        isFinalizeItem: true
       });
       view.propertyDidChange('canSkipFailedItem');
       expect(view.get('canSkipFailedItem')).to.be.false;
-      App.RepositoryVersion.find.restore();
+    });
+
+    it("Should return true if upgrade is patch or maint and item is not final", function () {
+
+      view.reopen({
+        isFinalizeItem: false
+      });
+      view.propertyDidChange('canSkipFailedItem');
+
+      expect(view.get('canSkipFailedItem')).to.be.true;
     });
 
   });


[42/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1295362
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1295362
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1295362

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: b1295362b9e702dd37bbb3995437d0c4e311ce9f
Parents: cec9f73
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 16:09:28 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 16:09:28 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  | 45 ++++++++++++++------
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      | 10 +++--
 .../2.1.0.2.0/package/scripts/install_params.py |  6 ---
 .../2.1.0.2.0/package/scripts/params_linux.py   |  2 -
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      | 10 +++--
 .../3.0.0.3.0/package/scripts/install_params.py |  6 ---
 .../3.0.0.3.0/package/scripts/params_linux.py   |  2 -
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  6 ++-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  3 --
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py    |  5 ++-
 .../4.2.0.3.0/package/scripts/params_linux.py   |  3 --
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  2 +
 .../src/test/python/stacks/utils/RMFTestCase.py |  4 +-
 13 files changed, 56 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..bf8c0dc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
+
     return Script.stack_version_from_distro_select
 
 
@@ -525,22 +526,20 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-    
+
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    stack_version_package_formatted = ""
+    if not STACK_VERSION_PLACEHOLDER in name:
+      return name
 
-    if not repo_version:
-      repo_version = self.get_stack_version_before_packages_installed()
+    stack_version_package_formatted = ""
 
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -550,6 +549,17 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
+    package_version = None
+    if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
+      self.load_available_packages()
+      package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+      if package_name is None:
+        raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+      return package_name
+
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -568,6 +578,7 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
+      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +771,19 @@ class Script(object):
     """
     self.install_packages(env)
 
+  def load_available_packages(self):
+    if self.available_packages_in_repos:
+      return self.available_packages_in_repos
+
+
+    pkg_provider = get_provider("Package")   
+    try:
+      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+    except Exception as err:
+      Logger.exception("Unable to load available packages")
+      self.available_packages_in_repos = []
+
+
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -782,17 +806,11 @@ class Script(object):
       package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
-      pkg_provider = get_provider("Package")
-      try:
-        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
-      except Exception as err:
-        Logger.exception("Unable to load available packages")
-        available_packages_in_repos = []
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.get_package_from_available(package['name'], available_packages_in_repos)
+            name = self.format_package_name(package['name'])
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1110,6 @@ class Script(object):
 
 
   def __init__(self):
+    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 76b430b..bb6349b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -389,7 +388,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index e054209..07c7616 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,6 +25,7 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +139,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index fe488c3..235f231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index de735f4..2fa6208 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -378,7 +377,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 64f9d54..f215a1e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,6 +37,7 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.core.resources.packaging import Package
 from resource_management.core.shell import as_user, as_sudo, call, checked_call
 from resource_management.core.exceptions import Fail
@@ -305,8 +306,9 @@ def oozie_server_specific(upgrade_type):
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index b66e157..a0f0672 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,7 +30,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,5 +387,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index d916d3b..0771e93 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,8 +275,9 @@ def oozie_server_specific():
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index d30a465..70b89b7 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,7 +28,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -370,5 +369,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..e32393d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,8 +95,10 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
+                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
 
+
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
                                 retry_count=5,

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1295362/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False):
+                    log_out_files=False,
+                    available_packages_in_repos = []):
 
     norm_path = os.path.normpath(path)
 
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
+        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))


[46/50] [abbrv] ambari git commit: AMBARI-22172. Change log level for ClusterNotFoundException (magyari_sandor)

Posted by ja...@apache.org.
AMBARI-22172. Change log level for ClusterNotFoundException (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/24c35893
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/24c35893
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/24c35893

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 24c35893b0be52146bc821b6d44eece8fd49c50a
Parents: 2d23e12
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Tue Oct 10 13:19:39 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Oct 10 17:10:40 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/controller/AmbariManagementControllerImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/24c35893/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 8c4888c..b0eb8ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -1126,7 +1126,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     try {
       cluster = clusters.getCluster(request.getClusterName());
     } catch (ClusterNotFoundException e) {
-      LOG.error("Cluster not found ", e);
+      LOG.info(e.getMessage());
       throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
     }
 


[02/50] [abbrv] ambari git commit: AMBARI-22158. Ambari schema upgrade fails when upgrading ambari from 2.5.1.0 to 2.6.0.0 and using oracle as database (dlysnichenko)

Posted by ja...@apache.org.
AMBARI-22158. Ambari schema upgrade fails when upgrading ambari from 2.5.1.0 to 2.6.0.0 and using oracle as database (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f44c8669
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f44c8669
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f44c8669

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: f44c86690a64523aed04bfd76e5c38e194e731ba
Parents: b358ee2
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Oct 6 17:02:13 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Oct 6 17:03:10 2017 +0300

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog260.java    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f44c8669/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index c31469e..866a501 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -144,11 +144,14 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   public static final String CURRENT = "CURRENT";
   public static final String SELECTED = "1";
   public static final String VIEWURL_TABLE = "viewurl";
+  public static final String VIEWINSTANCE_TABLE = "viewinstance";
   public static final String PK_VIEWURL = "PK_viewurl";
   public static final String URL_ID_COLUMN = "url_id";
   public static final String STALE_POSTGRESS_VIEWURL_PKEY = "viewurl_pkey";
   public static final String USERS_TABLE = "users";
   public static final String STALE_POSTGRESS_USERS_LDAP_USER_KEY = "users_ldap_user_key";
+  public static final String SHORT_URL_COLUMN = "short_url";
+  public static final String FK_INSTANCE_URL_ID = "FK_instance_url_id";
 
 
   /**
@@ -205,8 +208,11 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    * Adds the {@value #PK_VIEWURL} constraint.
    */
   private void addViewUrlPKConstraint() throws SQLException {
+    dbAccessor.dropFKConstraint(VIEWINSTANCE_TABLE, FK_INSTANCE_URL_ID);
     dbAccessor.dropPKConstraint(VIEWURL_TABLE, STALE_POSTGRESS_VIEWURL_PKEY);
     dbAccessor.addPKConstraint(VIEWURL_TABLE, PK_VIEWURL, URL_ID_COLUMN);
+    dbAccessor.addFKConstraint(VIEWINSTANCE_TABLE, FK_INSTANCE_URL_ID,
+        SHORT_URL_COLUMN, VIEWURL_TABLE, URL_ID_COLUMN, false);
   }
 
   /**


[07/50] [abbrv] ambari git commit: AMBARI-21601 [addendum]. Pre-configure services when Kerberos is enabled to reduce number of core service restarts when services are added (Eugene Chekanskiy via rlevas)

Posted by ja...@apache.org.
AMBARI-21601 [addendum]. Pre-configure services when Kerberos is enabled to reduce number of core service restarts when services are added (Eugene Chekanskiy via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/01b79aae
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/01b79aae
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/01b79aae

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 01b79aae5553dcd89464d8f6558bdba031417a3f
Parents: 8b83a0a
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Oct 6 12:26:16 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Oct 6 12:26:16 2017 -0400

----------------------------------------------------------------------
 .../resources/stacks/HDP/2.6/kerberos_preconfigure.json     | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/01b79aae/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json b/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
index 9c29393..8460958 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
@@ -16,6 +16,15 @@
             "local_username": "beacon"
           }
         }
+      ],
+      "configurations": [
+          {
+            "core-site": {
+              "hadoop.proxyuser.beacon.groups": "*",
+              "hadoop.proxyuser.beacon.hosts": "*",
+              "hadoop.proxyuser.beacon.users": "*"
+            }
+        }
       ]
     }
   ]


[11/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
deleted file mode 100644
index ec98c3c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import json
-import os
-from resource_management import Fail
-from resource_management.libraries.resources.properties_file import PropertiesFile
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.core.source import DownloadSource
-from resource_management.core.source import InlineTemplate
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.core.logger import Logger
-
-
-def druid(upgrade_type=None, nodeType=None):
-  import params
-  ensure_base_directories()
-
-  # Environment Variables
-  File(format("{params.druid_conf_dir}/druid-env.sh"),
-       owner=params.druid_user,
-       content=InlineTemplate(params.druid_env_sh_template),
-       mode = 0700
-       )
-
-  # common config
-  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
-  # User cannot override below configs
-  druid_common_config['druid.host'] = params.hostname
-  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
-  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
-  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
-    'druid.service']
-  druid_common_config['druid.selectors.coordinator.serviceName'] = \
-    params.config['configurations']['druid-coordinator']['druid.service']
-  druid_common_config['druid.extensions.loadList'] = json.dumps(eval(params.druid_extensions_load_list) +
-                                                     eval(params.druid_security_extensions_load_list))
-
-  # delete the password and user if empty otherwiswe derby will fail.
-  if 'derby' == druid_common_config['druid.metadata.storage.type']:
-    del druid_common_config['druid.metadata.storage.connector.user']
-    del druid_common_config['druid.metadata.storage.connector.password']
-
-  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
-
-  PropertiesFile("common.runtime.properties",
-                 dir=params.druid_common_conf_dir,
-                 properties=druid_common_config,
-                 owner=params.druid_user,
-                 group=params.user_group,
-                 mode = 0600
-                 )
-  Logger.info("Created common.runtime.properties")
-
-  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-       mode=0644,
-       owner=params.druid_user,
-       group=params.user_group,
-       content=InlineTemplate(params.log4j_props)
-       )
-  Logger.info("Created log4j file")
-
-  File("/etc/logrotate.d/druid",
-       mode=0644,
-       owner='root',
-       group='root',
-       content=InlineTemplate(params.logrotate_props)
-       )
-
-  Logger.info("Created log rotate file")
-
-  # node specific configs
-  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
-    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
-    node_type_lowercase = node_type.lower()
-
-    # Write runtime.properties file
-    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
-    PropertiesFile("runtime.properties",
-                   dir=node_config_dir,
-                   properties=node_config,
-                   owner=params.druid_user,
-                   group=params.user_group,
-                   mode = 0600
-                   )
-    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
-
-    # Write jvm configs
-    File(format('{node_config_dir}/jvm.config'),
-         owner=params.druid_user,
-         group=params.user_group,
-         content=InlineTemplate(
-           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
-           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-           node_direct_memory=druid_env_config[
-             format('druid.{node_type_lowercase}.jvm.direct.memory')],
-           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
-         )
-    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
-    # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs
-    if ['middleManager', 'overlord'].__contains__(node_type_lowercase) and params.lzo_enabled and len(
-            params.lzo_packages) > 0:
-        try:
-            Logger.info(
-                format(
-                    "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/"))
-            Execute(
-                format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/'))
-        except Fail as ex:
-            Logger.info(format("No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar"))
-
-  # All druid nodes have dependency on hdfs_client
-  ensure_hadoop_directories()
-  download_database_connector_if_needed()
-  # Pull all required dependencies
-  pulldeps()
-
-
-def mutable_config_dict(config):
-  rv = {}
-  for key, value in config.iteritems():
-    rv[key] = value
-  return rv
-
-
-def ensure_hadoop_directories():
-  import params
-  if 'hdfs-site' not in params.config['configurations']:
-    # HDFS Not Installed nothing to do.
-    Logger.info("Skipping HDFS directory creation as HDFS not installed")
-    return
-
-  druid_common_config = params.config['configurations']['druid-common']
-  # final overlord config contains both common and overlord config
-  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
-
-  # If user is using HDFS as deep storage create HDFS Directory for storing segments
-  deep_storage = druid_common_config["druid.storage.type"]
-  storage_dir = druid_common_config["druid.storage.storageDirectory"]
-
-  if deep_storage == 'hdfs':
-    # create the home dir for druid
-    params.HdfsResource(format("/user/{params.druid_user}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.druid_user,
-                        group='hadoop',
-                        recursive_chown=True,
-                        recursive_chmod=True
-                        )
-
-    # create the segment storage dir, users like hive from group hadoop need to write to this directory
-    create_hadoop_directory(storage_dir, mode=0775)
-
-  # Create HadoopIndexTask hadoopWorkingPath
-  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
-  if hadoop_working_path is not None:
-    if hadoop_working_path.startswith(params.hdfs_tmp_dir):
-        params.HdfsResource(params.hdfs_tmp_dir,
-                            type="directory",
-                            action="create_on_execute",
-                            owner=params.hdfs_user,
-                            mode=0777,
-                            )
-    create_hadoop_directory(hadoop_working_path, mode=0775)
-
-  # If HDFS is used for storing logs, create Index Task log directory
-  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
-  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
-  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
-    create_hadoop_directory(indexer_logs_directory)
-
-
-def create_hadoop_directory(hadoop_dir, mode=0755):
-  import params
-  params.HdfsResource(hadoop_dir,
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.druid_user,
-                      group='hadoop',
-                      mode=mode
-                      )
-  Logger.info(format("Created Hadoop Directory [{hadoop_dir}], with mode [{mode}]"))
-
-
-def ensure_base_directories():
-  import params
-  Directory(
-    [params.druid_log_dir, params.druid_pid_dir],
-    mode=0755,
-    owner=params.druid_user,
-    group=params.user_group,
-    create_parents=True,
-    recursive_ownership=True,
-  )
-
-  Directory(
-    [params.druid_conf_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
-     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
-     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir,
-     params.druid_tasks_dir],
-    mode=0700,
-    cd_access='a',
-    owner=params.druid_user,
-    group=params.user_group,
-    create_parents=True,
-    recursive_ownership=True,
-  )
-
-  segment_cache_locations = json.loads(params.druid_segment_cache_locations)
-  for segment_cache_location in segment_cache_locations:
-    Directory(
-      segment_cache_location["path"],
-      mode=0700,
-      owner=params.druid_user,
-      group=params.user_group,
-      create_parents=True,
-      recursive_ownership=True,
-      cd_access='a'
-    )
-
-
-
-def get_daemon_cmd(params=None, node_type=None, command=None):
-  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
-
-
-def getPid(params=None, nodeType=None):
-  return format('{params.druid_pid_dir}/{nodeType}.pid')
-
-
-def pulldeps():
-  import params
-  extensions_list = eval(params.druid_extensions)
-  extensions_string = '{0}'.format("-c ".join(extensions_list))
-  repository_list = eval(params.druid_repo_list)
-  repository_string = '{0}'.format("-r ".join(repository_list))
-  if len(extensions_list) > 0:
-    try:
-      # Make sure druid user has permissions to write dependencies
-      Directory(
-        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
-        mode=0755,
-        cd_access='a',
-        owner=params.druid_user,
-        group=params.user_group,
-        create_parents=True,
-        recursive_ownership=True,
-      )
-      pull_deps_command = format(
-        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
-        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
-        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop")
-
-      if len(repository_list) > 0:
-        pull_deps_command = format("{pull_deps_command} -r {repository_string}")
-
-      Execute(pull_deps_command,
-              user=params.druid_user
-              )
-      Logger.info(format("Pull Dependencies Complete"))
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-
-def download_database_connector_if_needed():
-  """
-  Downloads the database connector to use when connecting to the metadata storage
-  """
-  import params
-  if params.metadata_storage_type != 'mysql' or not params.jdbc_driver_jar:
-    return
-
-  File(params.check_db_connection_jar,
-       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}"))
-       )
-
-  target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
-
-  if not os.path.exists(target_jar_with_directory):
-    File(params.downloaded_custom_connector,
-         content=DownloadSource(params.connector_curl_source))
-
-    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
-            path=["/bin", "/usr/bin/"],
-            sudo=True)
-
-    File(target_jar_with_directory, owner=params.druid_user,
-         group=params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
deleted file mode 100644
index 8053dcb..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core import sudo
-from resource_management import Script
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.show_logs import show_logs
-from druid import druid, get_daemon_cmd, getPid
-
-
-class DruidBase(Script):
-  def __init__(self, nodeType=None):
-    self.nodeType = nodeType
-
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    node_type_lower = self.nodeType.lower()
-    Logger.info(format("Executing druid-{node_type_lower} Upgrade pre-restart"))
-    import params
-
-    env.set_params(params)
-
-    if params.stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version):
-      stack_select.select_packages(params.stack_version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env, upgrade_type=upgrade_type)
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
-    # Verify Database connection on Druid start
-    if params.metadata_storage_type == 'mysql':
-      if not params.jdbc_driver_jar or not os.path.isfile(params.connector_download_dir + os.path.sep + params.jdbc_driver_jar):
-        path_to_jdbc =  params.connector_download_dir + os.path.sep + "*"
-        error_message = "Error! Sorry, but we can't find jdbc driver for mysql.So, db connection check can fail." + \
-                        "Please run 'ambari-server setup --jdbc-db=mysql --jdbc-driver={path_to_jdbc} on server host.'"
-        Logger.error(error_message)
-      else:
-        path_to_jdbc = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
-      db_connection_check_command = format("{params.java8_home}/bin/java -cp {params.check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{params.metadata_storage_url}' {params.metadata_storage_user} {params.metadata_storage_password!p} com.mysql.jdbc.Driver")
-    else:
-      db_connection_check_command = None
-
-    if db_connection_check_command:
-      sudo.chmod(params.check_db_connection_jar, 0755)
-      Execute( db_connection_check_command,
-               tries=5,
-               try_sleep=10,
-               user=params.druid_user
-               )
-
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = getPid(status_params, self.nodeType)
-    check_process_status(pid_file)
-
-  def get_log_folder(self):
-    import params
-    return params.druid_log_dir
-
-  def get_user(self):
-    import params
-    return params.druid_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
deleted file mode 100644
index 22390a6..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidHistorical(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="historical")
-
-
-if __name__ == "__main__":
-  DruidHistorical().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
deleted file mode 100644
index 20df89c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidMiddleManager(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="middleManager")
-
-
-if __name__ == "__main__":
-  DruidMiddleManager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
deleted file mode 100644
index e4d7fcc..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidOverlord(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="overlord")
-
-
-if __name__ == "__main__":
-  DruidOverlord().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
deleted file mode 100644
index fd1cde6..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.default import default
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'DRUID_BROKER': 'druid-broker',
-  'DRUID_COORDINATOR': 'druid-coordinator',
-  'DRUID_HISTORICAL': 'druid-historical',
-  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
-  'DRUID_OVERLORD': 'druid-overlord',
-  'DRUID_ROUTER': 'druid-router'
-}
-
-# server configurations
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-# stack version
-stack_version = default("/commandParams/version", None)
-
-# un-formatted stack version
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-
-# default role to coordinator needed for service checks
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
-
-hostname = config['hostname']
-sudo = AMBARI_SUDO_BINARY
-
-# default druid parameters
-druid_home = format("{stack_root}/current/{component_directory}")
-druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-
-druid_common_conf_dir = druid_conf_dir + "/_common"
-druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
-druid_overlord_conf_dir = druid_conf_dir + "/overlord"
-druid_broker_conf_dir = druid_conf_dir + "/broker"
-druid_historical_conf_dir = druid_conf_dir + "/historical"
-druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
-druid_router_conf_dir = druid_conf_dir + "/router"
-druid_extensions_dir = druid_home + "/extensions"
-druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
-druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
-druid_segment_cache_locations = config['configurations']['druid-historical']['druid.segmentCache.locations']
-druid_tasks_dir = config['configurations']['druid-middlemanager']['druid.indexer.task.baseTaskDir']
-druid_user = config['configurations']['druid-env']['druid_user']
-druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
-druid_classpath = config['configurations']['druid-env']['druid_classpath']
-druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
-druid_repo_list = config['configurations']['druid-common']['druid.extensions.repositoryList']
-druid_extensions_load_list = config['configurations']['druid-common']['druid.extensions.loadList']
-druid_security_extensions_load_list = config['configurations']['druid-common']['druid.security.extensions.loadList']
-
-
-# status params
-druid_pid_dir = status_params.druid_pid_dir
-user_group = config['configurations']['cluster-env']['user_group']
-java8_home = config['hostLevelParams']['java_home']
-druid_env_sh_template = config['configurations']['druid-env']['content']
-
-# log4j params
-log4j_props = config['configurations']['druid-log4j']['content']
-druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
-metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
-root_log_level = config['configurations']['druid-log4j']['root_log_level']
-
-druid_log_maxbackupindex = default('/configurations/druid-logrotate/druid_log_maxbackupindex', 7)
-druid_log_maxfilesize = default('/configurations/druid-logrotate/druid_log_maxfilesize', 256)
-logrotate_props = config['configurations']['druid-logrotate']['content']
-
-# Metadata storage
-metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
-metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
-metadata_storage_db_name = config['configurations']['druid-common']['database_name']
-metadata_storage_db_name = config['configurations']['druid-common']['database_name']
-metadata_storage_type = config['configurations']['druid-common']['druid.metadata.storage.type']
-metadata_storage_url = config['configurations']['druid-common']['druid.metadata.storage.connector.connectURI']
-jdk_location = config['hostLevelParams']['jdk_location']
-if 'mysql' == metadata_storage_type:
-  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-  connector_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
-  connector_download_dir=format("{druid_extensions_dir}/mysql-metadata-storage")
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-# HDFS
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
-                                                                                                             hostname)
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-dfs_type = default("/commandParams/dfs_type", "")
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-# Kerberos
-druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
-                               'missing_principal')
-druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
-
-import functools
-
-# create partial functions with common arguments for every HdfsResource call
-# to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled=security_enabled,
-  keytab=hdfs_user_keytab,
-  kinit_path_local=kinit_path_local,
-  hadoop_bin_dir=hadoop_bin_dir,
-  hadoop_conf_dir=hadoop_conf_dir,
-  principal_name=hdfs_principal_name,
-  hdfs_site=hdfs_site,
-  default_fs=default_fs,
-  immutable_paths=get_not_managed_resources(),
-  dfs_type=dfs_type
-)
-
-# Ambari Metrics
-metric_emitter_type = "noop"
-metric_collector_host = ""
-metric_collector_port = ""
-metric_collector_protocol = ""
-metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-if has_metric_collector:
-    metric_emitter_type = "ambari-metrics"
-    if 'cluster-env' in config['configurations'] and \
-                    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-        metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-    else:
-        metric_collector_host = ams_collector_hosts[0]
-    if 'cluster-env' in config['configurations'] and \
-                    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-        metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-    else:
-        metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
-        if metric_collector_web_address.find(':') != -1:
-            metric_collector_port = metric_collector_web_address.split(':')[1]
-        else:
-            metric_collector_port = '6188'
-    if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-        metric_collector_protocol = 'https'
-    else:
-        metric_collector_protocol = 'http'
-    pass
-
-# Create current Hadoop Clients  Libs
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
-lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
-hadoop_lib_home = stack_root + '/' + stack_version + '/hadoop/lib'

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
deleted file mode 100644
index 1731a2a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidRouter(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="router")
-
-
-if __name__ == "__main__":
-  DruidRouter().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
deleted file mode 100644
index 139b727..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute
-
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
-    self.checkComponent(params, "druid_overlord", "druid-overlord")
-
-  def checkComponent(self, params, component_name, config_name):
-    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
-    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
-      Execute(format(
-        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
-        tries=10,
-        try_sleep=3,
-        logoutput=True)
-
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
deleted file mode 100644
index ee1d61c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
deleted file mode 100644
index c68b9b9..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol": {
-      "type": "HTTP_ONLY"
-    },
-    "links": [
-      {
-        "name": "coordinator_console",
-        "label": "Druid Coordinator Console",
-        "component_name": "DRUID_COORDINATOR",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8081",
-          "regex": "^(\\d+)$",
-          "site": "druid-coordinator"
-        }
-      },
-      {
-        "name": "overlord_console",
-        "label": "Druid Overlord Console",
-        "component_name": "DRUID_OVERLORD",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8090",
-          "regex": "^(\\d+)$",
-          "site": "druid-overlord"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
deleted file mode 100644
index 4d697fe..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "general_deps" : {
-    "_comment" : "dependencies for Druid",
-    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
-    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
-    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
-    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
-    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
-    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
deleted file mode 100644
index 7033e19..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
+++ /dev/null
@@ -1,120 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for Druid service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "metadata_storage",
-            "display-name": "META DATA STORAGE CONFIG",
-            "layout": {
-              "tab-columns": "",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-metadata-storage",
-                  "display-name": "",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "2",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-metadata-storage-row1-col1",
-                      "display-name": "DRUID META DATA STORAGE",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "druid-common/database_name",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.type",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.user",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.password",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/metastore_hostname",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.port",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.connectURI",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "druid-common/database_name",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.type",
-        "widget": {
-          "type": "combo"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.user",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.password",
-        "widget": {
-          "type": "password"
-        }
-      },
-      {
-        "config": "druid-common/metastore_hostname",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.port",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.connectURI",
-        "widget": {
-          "type": "text-field"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
index f1f099e..07dd6d6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
@@ -20,9 +20,8 @@
   <services>
     <service>
       <name>DRUID</name>
-      <version>0.9.2</version>
-      <extends>common-services/DRUID/0.9.2</extends>
-      <selection>TECH_PREVIEW</selection>
+      <version>0.10.1</version>
+      <extends>common-services/DRUID/0.10.1</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
index d8b9fe2..80e9d54 100644
--- a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
+++ b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
@@ -31,7 +31,7 @@ from resource_management.core.logger import Logger
 @patch("resource_management.libraries.Script.get_tmp_dir", new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
 @patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.0.0.0-1234"))
 class TestDruid(RMFTestCase):
-  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.9.2/package"
+  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.10.1/package"
   STACK_VERSION = "2.6"
   DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 


[28/50] [abbrv] ambari git commit: Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1c4626b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
deleted file mode 100644
index b54132c..0000000
--- a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
+++ /dev/null
@@ -1,2147 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License") you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Schema population script for $(AMBARIDBNAME)
-
-Use this script in sqlcmd mode, setting the environment variables like this:
-set AMBARIDBNAME=ambari
-
-sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATE.sql
-*/
-
-
-------create the database------
-
-------create tables and grant privileges to db user---------
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stack') AND type = 'U')
-BEGIN
-CREATE TABLE stack(
-  stack_id BIGINT NOT NULL,
-  stack_name VARCHAR(255) NOT NULL,
-  stack_version VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_stack PRIMARY KEY CLUSTERED (stack_id),
-  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extension') AND type = 'U')
-BEGIN
-CREATE TABLE extension(
-  extension_id BIGINT NOT NULL,
-  extension_name VARCHAR(255) NOT NULL,
-  extension_version VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_extension PRIMARY KEY CLUSTERED (extension_id),
-  CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extensionlink') AND type = 'U')
-BEGIN
-CREATE TABLE extensionlink(
-  link_id BIGINT NOT NULL,
-  stack_id BIGINT NOT NULL,
-  extension_id BIGINT NOT NULL,
-  CONSTRAINT PK_extensionlink PRIMARY KEY CLUSTERED (link_id),
-  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
-  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
-  CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresourcetype') AND type = 'U')
-BEGIN
-CREATE TABLE adminresourcetype (
-  resource_type_id INTEGER NOT NULL,
-  resource_type_name VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_adminresourcetype PRIMARY KEY CLUSTERED (resource_type_id)
-  )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresource') AND type = 'U')
-BEGIN
-CREATE TABLE adminresource (
-  resource_id BIGINT NOT NULL,
-  resource_type_id INTEGER NOT NULL,
-  CONSTRAINT PK_adminresource PRIMARY KEY CLUSTERED (resource_id),
-  CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusters') AND type = 'U')
-BEGIN
-CREATE TABLE clusters (
-  cluster_id BIGINT NOT NULL,
-  resource_id BIGINT NOT NULL,
-  upgrade_id BIGINT,
-  cluster_info VARCHAR(255) NOT NULL,
-  cluster_name VARCHAR(100) NOT NULL UNIQUE,
-  provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  desired_cluster_state VARCHAR(255) NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
-  CONSTRAINT PK_clusters PRIMARY KEY CLUSTERED (cluster_id),
-  CONSTRAINT FK_clusters_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
-  CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterconfig') AND type = 'U')
-BEGIN
-CREATE TABLE clusterconfig (
-  config_id BIGINT NOT NULL,
-  version_tag VARCHAR(255) NOT NULL,
-  version BIGINT NOT NULL,
-  type_name VARCHAR(255) NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  stack_id BIGINT NOT NULL,
-  selected SMALLINT NOT NULL DEFAULT 0,
-  config_data VARCHAR(MAX) NOT NULL,
-  config_attributes VARCHAR(MAX),
-  create_timestamp BIGINT NOT NULL,
-  unmapped SMALLINT NOT NULL DEFAULT 0,
-  selected_timestamp BIGINT NOT NULL DEFAULT 0,
-  CONSTRAINT PK_clusterconfig PRIMARY KEY CLUSTERED (config_id),
-  CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
-  CONSTRAINT FK_clusterconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
-  CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag),
-  CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfig') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfig (
-  service_config_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  version BIGINT NOT NULL,
-  create_timestamp BIGINT NOT NULL,
-  stack_id BIGINT NOT NULL,
-  user_name VARCHAR(255) NOT NULL DEFAULT '_db',
-  group_id BIGINT,
-  note VARCHAR(MAX),
-  CONSTRAINT PK_serviceconfig PRIMARY KEY CLUSTERED (service_config_id),
-  CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
-  CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hosts') AND type = 'U')
-BEGIN
-CREATE TABLE hosts (
-  host_id BIGINT NOT NULL,
-  host_name VARCHAR(255) NOT NULL,
-  cpu_count INTEGER NOT NULL,
-  ph_cpu_count INTEGER,
-  cpu_info VARCHAR(255) NOT NULL,
-  discovery_status VARCHAR(2000) NOT NULL,
-  host_attributes VARCHAR(MAX) NOT NULL,
-  ipv4 VARCHAR(255),
-  ipv6 VARCHAR(255),
-  public_host_name VARCHAR(255),
-  last_registration_time BIGINT NOT NULL,
-  os_arch VARCHAR(255) NOT NULL,
-  os_info VARCHAR(1000) NOT NULL,
-  os_type VARCHAR(255) NOT NULL,
-  rack_info VARCHAR(255) NOT NULL,
-  total_mem BIGINT NOT NULL,
-  CONSTRAINT PK_hosts PRIMARY KEY CLUSTERED (host_id),
-  CONSTRAINT UQ_hosts_host_name UNIQUE (host_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfighosts') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfighosts (
-  service_config_id BIGINT NOT NULL,
-  host_id BIGINT NOT NULL,
-  CONSTRAINT PK_serviceconfighosts PRIMARY KEY CLUSTERED (service_config_id, host_id),
-  CONSTRAINT FK_scvhosts_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id),
-  CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE serviceconfigmapping (
-  service_config_id BIGINT NOT NULL,
-  config_id BIGINT NOT NULL,
-  CONSTRAINT PK_serviceconfigmapping PRIMARY KEY CLUSTERED (service_config_id, config_id),
-  CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id),
-  CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterservices') AND type = 'U')
-BEGIN
-CREATE TABLE clusterservices (
-  service_name VARCHAR(255) NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  service_enabled INT NOT NULL,
-  CONSTRAINT PK_clusterservices PRIMARY KEY CLUSTERED (service_name, cluster_id),
-  CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterstate') AND type = 'U')
-BEGIN
-CREATE TABLE clusterstate (
-  cluster_id BIGINT NOT NULL,
-  current_cluster_state VARCHAR(255) NOT NULL,
-  current_stack_id BIGINT NOT NULL,
-  CONSTRAINT PK_clusterstate PRIMARY KEY CLUSTERED (cluster_id),
-  CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
-  CONSTRAINT FK_cs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('repo_version') AND type = 'U')
-BEGIN
-CREATE TABLE repo_version (
-  repo_version_id BIGINT NOT NULL,
-  stack_id BIGINT NOT NULL,
-  version VARCHAR(255) NOT NULL,
-  display_name VARCHAR(128) NOT NULL,
-  repositories VARCHAR(MAX) NOT NULL,
-  repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
-  hidden SMALLINT NOT NULL DEFAULT 0,
-  resolved BIT NOT NULL DEFAULT 0,
-  version_url VARCHAR(1024),
-  version_xml VARCHAR(MAX),
-  version_xsd VARCHAR(512),
-  parent_id BIGINT,
-  CONSTRAINT PK_repo_version PRIMARY KEY CLUSTERED (repo_version_id),
-  CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
-  CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name),
-  CONSTRAINT UQ_repo_version_stack_id UNIQUE (stack_id, version))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponentdesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE servicecomponentdesiredstate (
-  id BIGINT NOT NULL,
-  component_name VARCHAR(255) NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  desired_repo_version_id BIGINT NOT NULL,
-  desired_state VARCHAR(255) NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
-  repo_state VARCHAR(255) NOT NULL DEFAULT 'NOT_REQUIRED',
-  CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
-  CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentdesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE hostcomponentdesiredstate (
-  id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  component_name VARCHAR(255) NOT NULL,
-  desired_state VARCHAR(255) NOT NULL,
-  host_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  admin_state VARCHAR(32),
-  maintenance_state VARCHAR(32) NOT NULL,
-  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
-  restart_required BIT NOT NULL DEFAULT 0,
-  CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
-  CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentstate') AND type = 'U')
-BEGIN
-CREATE TABLE hostcomponentstate (
-  id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  component_name VARCHAR(255) NOT NULL,
-  version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_state VARCHAR(255) NOT NULL,
-  host_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
-  CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_host_component_state')
-BEGIN
-CREATE NONCLUSTERED INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hoststate') AND type = 'U')
-BEGIN
-CREATE TABLE hoststate (
-  agent_version VARCHAR(255) NOT NULL,
-  available_mem BIGINT NOT NULL,
-  current_state VARCHAR(255) NOT NULL,
-  health_status VARCHAR(255),
-  host_id BIGINT NOT NULL,
-  time_in_state BIGINT NOT NULL,
-  maintenance_state VARCHAR(512),
-  CONSTRAINT PK_hoststate PRIMARY KEY CLUSTERED (host_id),
-  CONSTRAINT FK_hoststate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicedesiredstate') AND type = 'U')
-BEGIN
-CREATE TABLE servicedesiredstate (
-  cluster_id BIGINT NOT NULL,
-  desired_host_role_mapping INTEGER NOT NULL,
-  desired_repo_version_id BIGINT NOT NULL,
-  desired_state VARCHAR(255) NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  maintenance_state VARCHAR(32) NOT NULL,
-  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
-  credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
-  CONSTRAINT PK_servicedesiredstate PRIMARY KEY CLUSTERED (cluster_id,service_name),
-  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
-  CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipaltype') AND type = 'U')
-BEGIN
-CREATE TABLE adminprincipaltype (
-  principal_type_id INTEGER NOT NULL,
-  principal_type_name VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_adminprincipaltype PRIMARY KEY CLUSTERED (principal_type_id)
-  )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipal') AND type = 'U')
-BEGIN
-CREATE TABLE adminprincipal (
-  principal_id BIGINT NOT NULL,
-  principal_type_id INTEGER NOT NULL,
-  CONSTRAINT PK_adminprincipal PRIMARY KEY CLUSTERED (principal_id),
-  CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('users') AND type = 'U')
-BEGIN
-CREATE TABLE users (
-  user_id INTEGER,
-  principal_id BIGINT NOT NULL,
-  ldap_user INTEGER NOT NULL DEFAULT 0,
-  user_name VARCHAR(255) NOT NULL,
-  user_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
-  create_time DATETIME DEFAULT GETDATE(),
-  user_password VARCHAR(255),
-  active INTEGER NOT NULL DEFAULT 1,
-  active_widget_layouts VARCHAR(1024) DEFAULT NULL,
-  CONSTRAINT PK_users PRIMARY KEY CLUSTERED (user_id),
-  CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
-  CONSTRAINT UNQ_users_0 UNIQUE (user_name, user_type))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('groups') AND type = 'U')
-BEGIN
-CREATE TABLE groups (
-  group_id INTEGER,
-  principal_id BIGINT NOT NULL,
-  group_name VARCHAR(255) NOT NULL,
-  ldap_group INTEGER NOT NULL DEFAULT 0,
-  group_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
-  CONSTRAINT PK_groups PRIMARY KEY CLUSTERED (group_id),
-  CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
-  CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('members') AND type = 'U')
-BEGIN
-CREATE TABLE members (
-  member_id INTEGER,
-  group_id INTEGER NOT NULL,
-  user_id INTEGER NOT NULL,
-  CONSTRAINT PK_members PRIMARY KEY CLUSTERED (member_id),
-  CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id),
-  CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id),
-  CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedule') AND type = 'U')
-BEGIN
-CREATE TABLE requestschedule (
-  schedule_id BIGINT,
-  cluster_id BIGINT NOT NULL,
-  description VARCHAR(255),
-  STATUS VARCHAR(255),
-  batch_separation_seconds SMALLINT,
-  batch_toleration_limit SMALLINT,
-  authenticated_user_id INTEGER,
-  create_user VARCHAR(255),
-  create_timestamp BIGINT,
-  update_user VARCHAR(255),
-  update_timestamp BIGINT,
-  minutes VARCHAR(10),
-  hours VARCHAR(10),
-  days_of_month VARCHAR(10),
-  month VARCHAR(10),
-  day_of_week VARCHAR(10),
-  yearToSchedule VARCHAR(10),
-  startTime VARCHAR(50),
-  endTime VARCHAR(50),
-  last_execution_status VARCHAR(255),
-  CONSTRAINT PK_requestschedule PRIMARY KEY CLUSTERED (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('request') AND type = 'U')
-BEGIN
-CREATE TABLE request (
-  request_id BIGINT NOT NULL,
-  cluster_id BIGINT,
-  command_name VARCHAR(255),
-  create_time BIGINT NOT NULL,
-  end_time BIGINT NOT NULL,
-  exclusive_execution BIT NOT NULL DEFAULT 0,
-  inputs VARBINARY(MAX),
-  request_context VARCHAR(255),
-  request_type VARCHAR(255),
-  request_schedule_id BIGINT,
-  start_time BIGINT NOT NULL,
-  status VARCHAR(255),
-  cluster_host_info VARBINARY(MAX) NOT NULL,
-  CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
-  CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stage') AND type = 'U')
-BEGIN
-CREATE TABLE stage (
-  stage_id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  skippable SMALLINT DEFAULT 0 NOT NULL,
-  supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
-  log_info VARCHAR(255) NOT NULL,
-  request_context VARCHAR(255),
-  command_params VARBINARY(MAX),
-  host_params VARBINARY(MAX),
-  command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
-  CONSTRAINT PK_stage PRIMARY KEY CLUSTERED (stage_id, request_id),
-  CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_role_command') AND type = 'U')
-BEGIN
-CREATE TABLE host_role_command (
-  task_id BIGINT NOT NULL,
-  attempt_count SMALLINT NOT NULL,
-  retry_allowed SMALLINT DEFAULT 0 NOT NULL,
-  event VARCHAR(MAX) NOT NULL,
-  exitcode INTEGER NOT NULL,
-  host_id BIGINT,
-  last_attempt_time BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  role VARCHAR(255),
-  stage_id BIGINT NOT NULL,
-  start_time BIGINT NOT NULL,
-  original_start_time BIGINT NOT NULL,
-  end_time BIGINT,
-  status VARCHAR(255),
-  auto_skip_on_failure SMALLINT DEFAULT 0 NOT NULL,
-  std_error VARBINARY(max),
-  std_out VARBINARY(max),
-  output_log VARCHAR(255) NULL,
-  error_log VARCHAR(255) NULL,
-  structured_out VARBINARY(max),
-  role_command VARCHAR(255),
-  command_detail VARCHAR(255),
-  custom_command_name VARCHAR(255),
-  is_background SMALLINT DEFAULT 0 NOT NULL,
-  CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
-  CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('execution_command') AND type = 'U')
-BEGIN
-CREATE TABLE execution_command (
-  command VARBINARY(MAX),
-  task_id BIGINT NOT NULL,
-  CONSTRAINT PK_execution_command PRIMARY KEY CLUSTERED (task_id),
-  CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('role_success_criteria') AND type = 'U')
-BEGIN
-CREATE TABLE role_success_criteria (
-  ROLE VARCHAR(255) NOT NULL,
-  request_id BIGINT NOT NULL,
-  stage_id BIGINT NOT NULL,
-  success_factor FLOAT NOT NULL,
-  CONSTRAINT PK_role_success_criteria PRIMARY KEY CLUSTERED (ROLE, request_id, stage_id),
-  CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestresourcefilter') AND type = 'U')
-BEGIN
-CREATE TABLE requestresourcefilter (
-  filter_id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  service_name VARCHAR(255),
-  component_name VARCHAR(255),
-  hosts VARBINARY(MAX),
-  CONSTRAINT PK_requestresourcefilter PRIMARY KEY CLUSTERED (filter_id),
-  CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestoperationlevel') AND type = 'U')
-BEGIN
-CREATE TABLE requestoperationlevel (
-  operation_level_id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  level_name VARCHAR(255),
-  cluster_name VARCHAR(255),
-  service_name VARCHAR(255),
-  host_component_name VARCHAR(255),
-  host_id BIGINT NULL,      -- unlike most host_id columns, this one allows NULLs because the request can be at the service level
-  CONSTRAINT PK_requestoperationlevel PRIMARY KEY CLUSTERED (operation_level_id),
-  CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ClusterHostMapping') AND type = 'U')
-BEGIN
-CREATE TABLE ClusterHostMapping (
-  cluster_id BIGINT NOT NULL,
-  host_id BIGINT NOT NULL,
-  CONSTRAINT PK_ClusterHostMapping PRIMARY KEY CLUSTERED (cluster_id, host_id),
-  CONSTRAINT FK_clhostmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
-  CONSTRAINT FK_clusterhostmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('key_value_store') AND type = 'U')
-BEGIN
-CREATE TABLE key_value_store (
-  [key] VARCHAR(255),
-  [value] VARCHAR(MAX),
-  CONSTRAINT PK_key_value_store PRIMARY KEY CLUSTERED ([key])
-  )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE hostconfigmapping (
-  cluster_id BIGINT NOT NULL,
-  host_id BIGINT NOT NULL,
-  type_name VARCHAR(255) NOT NULL,
-  version_tag VARCHAR(255) NOT NULL,
-  service_name VARCHAR(255),
-  create_timestamp BIGINT NOT NULL,
-  selected INTEGER NOT NULL DEFAULT 0,
-  user_name VARCHAR(255) NOT NULL DEFAULT '_db',
-  CONSTRAINT PK_hostconfigmapping PRIMARY KEY CLUSTERED (cluster_id, host_id, type_name, create_timestamp),
-  CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
-  CONSTRAINT FK_hostconfmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('metainfo') AND type = 'U')
-BEGIN
-CREATE TABLE metainfo (
-  [metainfo_key] VARCHAR(255),
-  [metainfo_value] VARCHAR(255),
-  CONSTRAINT PK_metainfo PRIMARY KEY CLUSTERED ([metainfo_key])
-  )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_sequences') AND type = 'U')
-BEGIN
-CREATE TABLE ambari_sequences (
-  sequence_name VARCHAR(255),
-  [sequence_value] BIGINT NOT NULL,
-  CONSTRAINT PK_ambari_sequences PRIMARY KEY (sequence_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgroup') AND type = 'U')
-BEGIN
-CREATE TABLE configgroup (
-  group_id BIGINT,
-  cluster_id BIGINT NOT NULL,
-  group_name VARCHAR(255) NOT NULL,
-  tag VARCHAR(1024) NOT NULL,
-  description VARCHAR(1024),
-  create_timestamp BIGINT NOT NULL,
-  service_name VARCHAR(255),
-  CONSTRAINT PK_configgroup PRIMARY KEY CLUSTERED (group_id),
-  CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('confgroupclusterconfigmapping') AND type = 'U')
-BEGIN
-CREATE TABLE confgroupclusterconfigmapping (
-  config_group_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  config_type VARCHAR(255) NOT NULL,
-  version_tag VARCHAR(255) NOT NULL,
-  user_name VARCHAR(255) DEFAULT '_db',
-  create_timestamp BIGINT NOT NULL,
-  CONSTRAINT PK_confgroupclustercfgmapping PRIMARY KEY CLUSTERED (config_group_id, cluster_id, config_type),
-  CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
-  CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgrouphostmapping') AND type = 'U')
-BEGIN
-CREATE TABLE configgrouphostmapping (
-  config_group_id BIGINT NOT NULL,
-  host_id BIGINT NOT NULL,
-  CONSTRAINT PK_configgrouphostmapping PRIMARY KEY CLUSTERED (config_group_id, host_id),
-  CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
-  CONSTRAINT FK_cghm_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedulebatchrequest') AND type = 'U')
-BEGIN
-CREATE TABLE requestschedulebatchrequest (
-  schedule_id BIGINT,
-  batch_id BIGINT,
-  request_id BIGINT,
-  request_type VARCHAR(255),
-  request_uri VARCHAR(1024),
-  request_body VARBINARY(MAX),
-  request_status VARCHAR(255),
-  return_code SMALLINT,
-  return_message TEXT,
-  CONSTRAINT PK_requestschedulebatchrequest PRIMARY KEY CLUSTERED (schedule_id, batch_id),
-  CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint (
-  blueprint_name VARCHAR(255) NOT NULL,
-  stack_id BIGINT NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  CONSTRAINT PK_blueprint PRIMARY KEY CLUSTERED (blueprint_name),
-  CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup (
-  blueprint_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  cardinality VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME),
-  CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_component') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup_component (
-  blueprint_name VARCHAR(255) NOT NULL,
-  hostgroup_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  provision_action VARCHAR(255),
-  CONSTRAINT PK_hostgroup_component PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, NAME),
-  CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_configuration') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint_configuration (
-  blueprint_name VARCHAR(255) NOT NULL,
-  type_name VARCHAR(255) NOT NULL,
-  config_data VARCHAR(MAX) NOT NULL,
-  config_attributes VARCHAR(MAX),
-  CONSTRAINT PK_blueprint_configuration PRIMARY KEY CLUSTERED (blueprint_name, type_name),
-  CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_setting') AND type = 'U')
-BEGIN
-CREATE TABLE blueprint_setting (
-  id BIGINT NOT NULL,
-  blueprint_name VARCHAR(255) NOT NULL,
-  setting_name VARCHAR(255) NOT NULL,
-  setting_data TEXT NOT NULL,
-  CONSTRAINT PK_blueprint_setting PRIMARY KEY (id),
-  CONSTRAINT UQ_blueprint_setting_name UNIQUE(blueprint_name,setting_name),
-  CONSTRAINT FK_blueprint_setting_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name)
-  )
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_configuration') AND type = 'U')
-BEGIN
-CREATE TABLE hostgroup_configuration (
-  blueprint_name VARCHAR(255) NOT NULL,
-  hostgroup_name VARCHAR(255) NOT NULL,
-  type_name VARCHAR(255) NOT NULL,
-  config_data VARCHAR(MAX) NOT NULL,
-  config_attributes VARCHAR(MAX),
-  CONSTRAINT PK_hostgroup_configuration PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, type_name),
-  CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewmain') AND type = 'U')
-BEGIN
-CREATE TABLE viewmain (
-  view_name VARCHAR(255) NOT NULL,
-  label VARCHAR(255),
-  description VARCHAR(2048),
-  version VARCHAR(255),
-  build VARCHAR(128),
-  resource_type_id INTEGER NOT NULL,
-  icon VARCHAR(255),
-  icon64 VARCHAR(255),
-  archive VARCHAR(255),
-  mask VARCHAR(255),
-  system_view BIT NOT NULL DEFAULT 0,
-  CONSTRAINT PK_viewmain PRIMARY KEY CLUSTERED (view_name),
-  CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewurl') AND type = 'U')
-BEGIN
-CREATE table viewurl(
-  url_id BIGINT ,
-  url_name VARCHAR(255) NOT NULL ,
-  url_suffix VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_viewurl PRIMARY KEY CLUSTERED (url_id)
-)
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstance') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstance (
-  view_instance_id BIGINT,
-  resource_id BIGINT NOT NULL,
-  view_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  label VARCHAR(255),
-  description VARCHAR(2048),
-  visible CHAR(1),
-  icon VARCHAR(255),
-  icon64 VARCHAR(255),
-  xml_driven CHAR(1),
-  alter_names BIT NOT NULL DEFAULT 1,
-  cluster_handle BIGINT,
-  cluster_type VARCHAR(100) NOT NULL DEFAULT 'LOCAL_AMBARI',
-  short_url BIGINT,
-  CONSTRAINT PK_viewinstance PRIMARY KEY CLUSTERED (view_instance_id),
-  CONSTRAINT FK_instance_url_id FOREIGN KEY (short_url) REFERENCES viewurl(url_id),
-  CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name),
-  CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id),
-  CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name),
-  CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstancedata') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstancedata (
-  view_instance_id BIGINT,
-  view_name VARCHAR(255) NOT NULL,
-  view_instance_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  user_name VARCHAR(255) NOT NULL,
-  value VARCHAR(2000) NOT NULL,
-  CONSTRAINT PK_viewinstancedata PRIMARY KEY CLUSTERED (view_instance_id, NAME, user_name),
-  CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name))
-END
-
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstanceproperty') AND type = 'U')
-BEGIN
-CREATE TABLE viewinstanceproperty (
-  view_name VARCHAR(255) NOT NULL,
-  view_instance_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  value VARCHAR(2000),
-  CONSTRAINT PK_viewinstanceproperty PRIMARY KEY CLUSTERED (view_name, view_instance_name, NAME),
-  CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewparameter') AND type = 'U')
-BEGIN
-CREATE TABLE viewparameter (
-  view_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  description VARCHAR(2048),
-  label VARCHAR(255),
-  placeholder VARCHAR(255),
-  default_value VARCHAR(2000),
-  cluster_config VARCHAR(255),
-  required CHAR(1),
-  masked CHAR(1),
-  CONSTRAINT PK_viewparameter PRIMARY KEY CLUSTERED (view_name, NAME),
-  CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewresource') AND type = 'U')
-BEGIN
-CREATE TABLE viewresource (
-  view_name VARCHAR(255) NOT NULL,
-  NAME VARCHAR(255) NOT NULL,
-  plural_name VARCHAR(255),
-  id_property VARCHAR(255),
-  subResource_names VARCHAR(255),
-  provider VARCHAR(255),
-  service VARCHAR(255),
-  resource VARCHAR(255),
-  CONSTRAINT PK_viewresource PRIMARY KEY CLUSTERED (view_name, NAME),
-  CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewentity') AND type = 'U')
-BEGIN
-CREATE TABLE viewentity (
-  id BIGINT NOT NULL,
-  view_name VARCHAR(255) NOT NULL,
-  view_instance_name VARCHAR(255) NOT NULL,
-  class_name VARCHAR(255) NOT NULL,
-  id_property VARCHAR(255),
-  CONSTRAINT PK_viewentity PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminpermission') AND type = 'U')
-BEGIN
-CREATE TABLE adminpermission (
-  permission_id BIGINT NOT NULL,
-  permission_name VARCHAR(255) NOT NULL,
-  resource_type_id INTEGER NOT NULL,
-  permission_label VARCHAR(255),
-  principal_id BIGINT NOT NULL,
-  sort_order SMALLINT NOT NULL DEFAULT 1,
-  CONSTRAINT PK_adminpermission PRIMARY KEY CLUSTERED (permission_id),
-  CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id),
-  CONSTRAINT FK_permission_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
-  CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('roleauthorization') AND type = 'U')
-BEGIN
-CREATE TABLE roleauthorization (
-  authorization_id VARCHAR(100) NOT NULL,
-  authorization_name VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_roleauthorization PRIMARY KEY (authorization_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('permission_roleauthorization') AND type = 'U')
-BEGIN
-CREATE TABLE permission_roleauthorization (
-  permission_id BIGINT NOT NULL,
-  authorization_id VARCHAR(100) NOT NULL,
-  CONSTRAINT PK_permsn_roleauthorization PRIMARY KEY (permission_id, authorization_id),
-  CONSTRAINT FK_permission_roleauth_aid FOREIGN KEY (authorization_id) REFERENCES roleauthorization(authorization_id),
-  CONSTRAINT FK_permission_roleauth_pid FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprivilege') AND type = 'U')
-BEGIN
-CREATE TABLE adminprivilege (
-  privilege_id BIGINT,
-  permission_id BIGINT NOT NULL,
-  resource_id BIGINT NOT NULL,
-  principal_id BIGINT NOT NULL,
-  CONSTRAINT PK_adminprivilege PRIMARY KEY CLUSTERED (privilege_id),
-  CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id),
-  CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
-  CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_version') AND type = 'U')
-BEGIN
-CREATE TABLE host_version (
-  id BIGINT NOT NULL,
-  repo_version_id BIGINT NOT NULL,
-  host_id BIGINT NOT NULL,
-  STATE VARCHAR(32) NOT NULL,
-  CONSTRAINT PK_host_version PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_host_version_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_host_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id),
-  CONSTRAINT UQ_host_repo UNIQUE(host_id, repo_version_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('artifact') AND type = 'U')
-BEGIN
-CREATE TABLE artifact (
-  artifact_name VARCHAR(255) NOT NULL,
-  artifact_data TEXT NOT NULL,
-  foreign_keys VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_artifact PRIMARY KEY CLUSTERED (artifact_name, foreign_keys)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget') AND type = 'U')
-BEGIN
-CREATE TABLE widget (
-  id BIGINT NOT NULL,
-  widget_name VARCHAR(255) NOT NULL,
-  widget_type VARCHAR(255) NOT NULL,
-  metrics TEXT,
-  time_created BIGINT NOT NULL,
-  author VARCHAR(255),
-  description VARCHAR(2048),
-  default_section_name VARCHAR(255),
-  scope VARCHAR(255),
-  widget_values VARCHAR(4000),
-  properties VARCHAR(4000),
-  cluster_id BIGINT NOT NULL,
-  CONSTRAINT PK_widget PRIMARY KEY CLUSTERED (id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout') AND type = 'U')
-BEGIN
-CREATE TABLE widget_layout (
-  id BIGINT NOT NULL,
-  layout_name VARCHAR(255) NOT NULL,
-  section_name VARCHAR(255) NOT NULL,
-  scope VARCHAR(255) NOT NULL,
-  user_name VARCHAR(255) NOT NULL,
-  display_name VARCHAR(255),
-  cluster_id BIGINT NOT NULL,
-  CONSTRAINT PK_widget_layout PRIMARY KEY CLUSTERED (id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout_user_widget') AND type = 'U')
-BEGIN
-CREATE TABLE widget_layout_user_widget (
-  widget_layout_id BIGINT NOT NULL,
-  widget_id BIGINT NOT NULL,
-  widget_order smallint,
-  CONSTRAINT PK_widget_layout_user_widget PRIMARY KEY CLUSTERED (widget_layout_id, widget_id),
-  CONSTRAINT FK_widget_id FOREIGN KEY (widget_id) REFERENCES widget(id),
-  CONSTRAINT FK_widget_layout_id FOREIGN KEY (widget_layout_id) REFERENCES widget_layout(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_request (
-  id BIGINT NOT NULL,
-  action VARCHAR(255) NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  bp_name VARCHAR(100) NOT NULL,
-  cluster_properties TEXT,
-  cluster_attributes TEXT,
-  description VARCHAR(1024),
-  provision_action VARCHAR(255),
-  CONSTRAINT PK_topology_request PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_hostgroup') AND type = 'U')
-BEGIN
-CREATE TABLE topology_hostgroup (
-  id BIGINT NOT NULL,
-  name VARCHAR(255) NOT NULL,
-  group_properties TEXT,
-  group_attributes TEXT,
-  request_id BIGINT NOT NULL,
-  CONSTRAINT PK_topology_hostgroup PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_info') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_info (
-  id BIGINT NOT NULL,
-  group_id BIGINT NOT NULL,
-  fqdn VARCHAR(255),
-  host_id BIGINT,
-  host_count INTEGER,
-  predicate VARCHAR(2048),
-  rack_info VARCHAR(255),
-  CONSTRAINT PK_topology_host_info PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hostinfo_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
-  CONSTRAINT FK_hostinfo_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_logical_request (
-  id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  description VARCHAR(1024),
-  CONSTRAINT PK_topology_logical_request PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_logicalreq_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_request') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_request (
-  id BIGINT NOT NULL,
-  logical_request_id BIGINT NOT NULL,
-  group_id BIGINT NOT NULL,
-  stage_id BIGINT NOT NULL,
-  host_name VARCHAR(255),
-  CONSTRAINT PK_topology_host_request PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hostreq_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
-  CONSTRAINT FK_hostreq_logicalreq_id FOREIGN KEY (logical_request_id) REFERENCES topology_logical_request(id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_task') AND type = 'U')
-BEGIN
-CREATE TABLE topology_host_task (
-  id BIGINT NOT NULL,
-  host_request_id BIGINT NOT NULL,
-  type VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_topology_host_task PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hosttask_req_id FOREIGN KEY (host_request_id) REFERENCES topology_host_request (id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_task') AND type = 'U')
-BEGIN
-CREATE TABLE topology_logical_task (
-  id BIGINT NOT NULL,
-  host_task_id BIGINT NOT NULL,
-  physical_task_id BIGINT,
-  component VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_topology_logical_task PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_ltask_hosttask_id FOREIGN KEY (host_task_id) REFERENCES topology_host_task (id),
-  CONSTRAINT FK_ltask_hrc_id FOREIGN KEY (physical_task_id) REFERENCES host_role_command (task_id))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('setting') AND type = 'U')
-BEGIN
-CREATE TABLE setting (
-  id BIGINT NOT NULL,
-  name VARCHAR(255) NOT NULL UNIQUE,
-  setting_type VARCHAR(255) NOT NULL,
-  content TEXT NOT NULL,
-  updated_by VARCHAR(255) NOT NULL DEFAULT '_db',
-  update_timestamp BIGINT NOT NULL,
-  CONSTRAINT PK_setting PRIMARY KEY (id)
-)
-END
-
-
--- Remote Cluster table
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambaricluster') AND type = 'U')
-BEGIN
-CREATE TABLE remoteambaricluster(
-  cluster_id BIGINT NOT NULL,
-  name VARCHAR(255) NOT NULL,
-  username VARCHAR(255) NOT NULL,
-  url VARCHAR(255) NOT NULL,
-  password VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambariclusterservice') AND type = 'U')
-BEGIN
-CREATE TABLE remoteambariclusterservice(
-  id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_remote_ambari_service PRIMARY KEY (id),
-  CONSTRAINT FK_remote_ambari_cluster_id FOREIGN KEY (cluster_id) REFERENCES remoteambaricluster(cluster_id)
-)
-END
-
-
--- Remote Cluster table ends
-
--- upgrade tables
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade (
-  upgrade_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
-  orchestration VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
-  upgrade_package VARCHAR(255) NOT NULL,
-  upgrade_type VARCHAR(32) NOT NULL,
-  repo_version_id BIGINT NOT NULL,
-  skip_failures BIT NOT NULL DEFAULT 0,
-  skip_sc_failures BIT NOT NULL DEFAULT 0,
-  downgrade_allowed BIT NOT NULL DEFAULT 1,
-  revert_allowed BIT NOT NULL DEFAULT 0,
-  suspended BIT DEFAULT 0 NOT NULL,
-  CONSTRAINT PK_upgrade PRIMARY KEY CLUSTERED (upgrade_id),
-  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
-  FOREIGN KEY (request_id) REFERENCES request(request_id),
-  FOREIGN KEY (repo_version_id) REFERENCES repo_version(repo_version_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_group') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_group (
-  upgrade_group_id BIGINT NOT NULL,
-  upgrade_id BIGINT NOT NULL,
-  group_name VARCHAR(255) DEFAULT '' NOT NULL,
-  group_title VARCHAR(1024) DEFAULT '' NOT NULL,
-  CONSTRAINT PK_upgrade_group PRIMARY KEY CLUSTERED (upgrade_group_id),
-  FOREIGN KEY (upgrade_id) REFERENCES upgrade(upgrade_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_item') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_item (
-  upgrade_item_id BIGINT NOT NULL,
-  upgrade_group_id BIGINT NOT NULL,
-  stage_id BIGINT NOT NULL,
-  state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
-  hosts TEXT,
-  tasks TEXT,
-  item_text TEXT,
-  CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
-  FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_history') AND type = 'U')
-BEGIN
-CREATE TABLE upgrade_history(
-  id BIGINT NOT NULL,
-  upgrade_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  component_name VARCHAR(255) NOT NULL,
-  from_repo_version_id BIGINT NOT NULL,
-  target_repo_version_id BIGINT NOT NULL,
-  CONSTRAINT PK_upgrade_hist PRIMARY KEY (id),
-  CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id),
-  CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id),
-  CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id),
-  CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponent_version') AND type = 'U')
-BEGIN
-CREATE TABLE servicecomponent_version(
-  id BIGINT NOT NULL,
-  component_id BIGINT NOT NULL,
-  repo_version_id BIGINT NOT NULL,
-  state VARCHAR(32) NOT NULL,
-  user_name VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_sc_version PRIMARY KEY (id),
-  CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id),
-  CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_operation_history') AND type = 'U')
-BEGIN
-CREATE TABLE ambari_operation_history(
-  id BIGINT NOT NULL,
-  from_version VARCHAR(255) NOT NULL,
-  to_version VARCHAR(255) NOT NULL,
-  start_time BIGINT NOT NULL,
-  end_time BIGINT,
-  operation_type VARCHAR(255) NOT NULL,
-  comments TEXT,
-  CONSTRAINT PK_ambari_operation_history PRIMARY KEY (id)
-)
-END
-
-
-
--- tasks indices --
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_stage_request_id')
-BEGIN
-CREATE INDEX idx_stage_request_id ON stage (request_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_request_id')
-BEGIN
-CREATE INDEX idx_hrc_request_id ON host_role_command (request_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_status_role')
-BEGIN
-CREATE INDEX idx_hrc_status_role ON host_role_command (status, role)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_rsc_request_id')
-BEGIN
-CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id)
-END
-
-
-
--- altering tables by creating unique constraints----------
---------altering tables to add constraints----------
-
--- altering tables by creating foreign keys----------
--- Note, Oracle has a limitation of 32 chars in the FK name, and we should use the same FK name in all DB types.
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('FK_clusters_upgrade_id') AND type = 'F')
-BEGIN
-ALTER TABLE clusters ADD CONSTRAINT FK_clusters_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id)
-END
-
-
--- Kerberos
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_principal (
-  principal_name VARCHAR(255) NOT NULL,
-  is_service SMALLINT NOT NULL DEFAULT 1,
-  cached_keytab_path VARCHAR(255),
-  CONSTRAINT PK_kerberos_principal PRIMARY KEY CLUSTERED (principal_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal_host') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_principal_host (
-  principal_name VARCHAR(255) NOT NULL,
-  host_id BIGINT NOT NULL,
-  CONSTRAINT PK_kerberos_principal_host PRIMARY KEY CLUSTERED (principal_name, host_id),
-  CONSTRAINT FK_krb_pr_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_krb_pr_host_principalname FOREIGN KEY (principal_name) REFERENCES kerberos_principal (principal_name))
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_descriptor') AND type = 'U')
-BEGIN
-CREATE TABLE kerberos_descriptor
-(
-   kerberos_descriptor_name   VARCHAR(255) NOT NULL,
-   kerberos_descriptor        VARCHAR(MAX) NOT NULL,
-   CONSTRAINT PK_kerberos_descriptor PRIMARY KEY (kerberos_descriptor_name)
-)
-END
-
-
--- Kerberos (end)
-
--- Alerting Framework
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_definition') AND type = 'U')
-BEGIN
-CREATE TABLE alert_definition (
-  definition_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  definition_name VARCHAR(255) NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  component_name VARCHAR(255),
-  scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
-  label VARCHAR(255),
-  help_url VARCHAR(512),
-  description TEXT,
-  enabled SMALLINT DEFAULT 1 NOT NULL,
-  schedule_interval INTEGER NOT NULL,
-  source_type VARCHAR(255) NOT NULL,
-  alert_source TEXT NOT NULL,
-  hash VARCHAR(64) NOT NULL,
-  ignore_host SMALLINT DEFAULT 0 NOT NULL,
-  repeat_tolerance INTEGER DEFAULT 1 NOT NULL,
-  repeat_tolerance_enabled SMALLINT DEFAULT 0 NOT NULL,
-  CONSTRAINT PK_alert_definition PRIMARY KEY CLUSTERED (definition_id),
-  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
-  CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_history') AND type = 'U')
-BEGIN
-CREATE TABLE alert_history (
-  alert_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  alert_definition_id BIGINT NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  component_name VARCHAR(255),
-  host_name VARCHAR(255),
-  alert_instance VARCHAR(255),
-  alert_timestamp BIGINT NOT NULL,
-  alert_label VARCHAR(1024),
-  alert_state VARCHAR(255) NOT NULL,
-  alert_text TEXT,
-  CONSTRAINT PK_alert_history PRIMARY KEY CLUSTERED (alert_id),
-  FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
-  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_current') AND type = 'U')
-BEGIN
-CREATE TABLE alert_current (
-  alert_id BIGINT NOT NULL,
-  definition_id BIGINT NOT NULL,
-  history_id BIGINT NOT NULL UNIQUE,
-  maintenance_state VARCHAR(255) NOT NULL,
-  original_timestamp BIGINT NOT NULL,
-  latest_timestamp BIGINT NOT NULL,
-  latest_text TEXT,
-  occurrences BIGINT NOT NULL DEFAULT 1,
-  firmness VARCHAR(255) NOT NULL DEFAULT 'HARD',
-  CONSTRAINT PK_alert_current PRIMARY KEY CLUSTERED (alert_id),
-  FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
-  FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group') AND type = 'U')
-BEGIN
-CREATE TABLE alert_group (
-  group_id BIGINT NOT NULL,
-  cluster_id BIGINT NOT NULL,
-  group_name VARCHAR(255) NOT NULL,
-  is_default SMALLINT NOT NULL DEFAULT 0,
-  service_name VARCHAR(255),
-  CONSTRAINT PK_alert_group PRIMARY KEY CLUSTERED (group_id),
-  CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target') AND type = 'U')
-BEGIN
-CREATE TABLE alert_target (
-  target_id BIGINT NOT NULL,
-  target_name VARCHAR(255) NOT NULL UNIQUE,
-  notification_type VARCHAR(64) NOT NULL,
-  properties TEXT,
-  description VARCHAR(1024),
-  is_global SMALLINT NOT NULL DEFAULT 0,
-  is_enabled SMALLINT NOT NULL DEFAULT 1,
-  CONSTRAINT PK_alert_target PRIMARY KEY CLUSTERED (target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target_states') AND type = 'U')
-BEGIN
-CREATE TABLE alert_target_states (
-  target_id BIGINT NOT NULL,
-  alert_state VARCHAR(255) NOT NULL,
-  FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group_target') AND type = 'U')
-BEGIN
-CREATE TABLE alert_group_target (
-  group_id BIGINT NOT NULL,
-  target_id BIGINT NOT NULL,
-  CONSTRAINT PK_alert_group_target PRIMARY KEY CLUSTERED (group_id, target_id),
-  FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
-  FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_grouping') AND type = 'U')
-BEGIN
-CREATE TABLE alert_grouping (
-  definition_id BIGINT NOT NULL,
-  group_id BIGINT NOT NULL,
-  CONSTRAINT PK_alert_grouping PRIMARY KEY CLUSTERED (group_id, definition_id),
-  FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
-  FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_notice') AND type = 'U')
-BEGIN
-CREATE TABLE alert_notice (
-  notification_id BIGINT NOT NULL,
-  target_id BIGINT NOT NULL,
-  history_id BIGINT NOT NULL,
-  notify_state VARCHAR(255) NOT NULL,
-  uuid VARCHAR(64) NOT NULL UNIQUE,
-  CONSTRAINT PK_alert_notice PRIMARY KEY CLUSTERED (notification_id),
-  FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
-  FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_def_id')
-BEGIN
-CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_service')
-BEGIN
-CREATE INDEX idx_alert_history_service on alert_history(service_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_host')
-BEGIN
-CREATE INDEX idx_alert_history_host on alert_history(host_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_time')
-BEGIN
-CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_state')
-BEGIN
-CREATE INDEX idx_alert_history_state on alert_history(alert_state)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_group_name')
-BEGIN
-CREATE INDEX idx_alert_group_name on alert_group(group_name)
-END
-
-IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_notice_state')
-BEGIN
-CREATE INDEX idx_alert_notice_state on alert_notice(notify_state)
-END
-
-
----------inserting some data-----------
-BEGIN TRANSACTION
-  DELETE metainfo;
-  DELETE adminprivilege;
-  DELETE permission_roleauthorization;
-  DELETE roleauthorization;
-  DELETE adminpermission;
-  DELETE users;
-  DELETE adminprincipal;
-  DELETE adminprincipaltype;
-  DELETE adminresource;
-  DELETE adminresourcetype;
-  DELETE ambari_sequences;
-  INSERT INTO ambari_sequences (sequence_name, [sequence_value])
-  VALUES
-    ('cluster_id_seq', 1),
-    ('host_id_seq', 0),
-    ('user_id_seq', 2),
-    ('group_id_seq', 1),
-    ('member_id_seq', 1),
-    ('host_role_command_id_seq', 1),
-    ('configgroup_id_seq', 1),
-    ('requestschedule_id_seq', 1),
-    ('resourcefilter_id_seq', 1),
-    ('viewentity_id_seq', 0),
-    ('operation_level_id_seq', 1),
-    ('view_instance_id_seq', 1),
-    ('resource_type_id_seq', 4),
-    ('resource_id_seq', 2),
-    ('principal_type_id_seq', 8),
-    ('principal_id_seq', 13),
-    ('permission_id_seq', 7),
-    ('privilege_id_seq', 1),
-    ('alert_definition_id_seq', 0),
-    ('alert_group_id_seq', 0),
-    ('alert_target_id_seq', 0),
-    ('alert_history_id_seq', 0),
-    ('alert_notice_id_seq', 0),
-    ('alert_current_id_seq', 0),
-    ('config_id_seq', 11),
-    ('repo_version_id_seq', 0),
-    ('host_version_id_seq', 0),
-    ('service_config_id_seq', 1),
-    ('upgrade_id_seq', 0),
-    ('upgrade_group_id_seq', 0),
-    ('widget_id_seq', 0),
-    ('widget_layout_id_seq', 0),
-    ('upgrade_item_id_seq', 0),
-    ('stack_id_seq', 0),
-    ('extension_id_seq', 0),
-    ('link_id_seq', 0),
-    ('topology_host_info_id_seq', 0),
-    ('topology_host_request_id_seq', 0),
-    ('topology_host_task_id_seq', 0),
-    ('topology_logical_request_id_seq', 0),
-    ('topology_logical_task_id_seq', 0),
-    ('topology_request_id_seq', 0),
-    ('topology_host_group_id_seq', 0),
-    ('setting_id_seq', 0),
-    ('hostcomponentstate_id_seq', 0),
-    ('servicecomponentdesiredstate_id_seq', 0),
-    ('upgrade_history_id_seq', 0),
-    ('blueprint_setting_id_seq', 0),
-    ('ambari_operation_history_id_seq', 0),
-    ('remote_cluster_id_seq', 0),
-    ('remote_cluster_service_id_seq', 0),
-    ('servicecomponent_version_id_seq', 0),
-    ('hostcomponentdesiredstate_id_seq', 0)
-
-  insert into adminresourcetype (resource_type_id, resource_type_name)
-  values
-    (1, 'AMBARI'),
-    (2, 'CLUSTER'),
-    (3, 'VIEW')
-
-  insert into adminresource (resource_id, resource_type_id)
-    select 1, 1
-
-  insert into adminprincipaltype (principal_type_id, principal_type_name)
-  values
-    (1, 'USER'),
-    (2, 'GROUP'),
-    (8, 'ROLE')
-
-  insert into adminprincipal (principal_id, principal_type_id)
-  values
-    (1, 1),
-    (7, 8),
-    (8, 8),
-    (9, 8),
-    (10, 8),
-    (11, 8),
-    (12, 8),
-    (13, 8)
-
-  insert into users(user_id, principal_id, user_name, user_password)
-    select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00'
-
-  insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, principal_id, sort_order)
-  values
-    (1, 'AMBARI.ADMINISTRATOR', 1, 'Ambari Administrator', 7, 1),
-    (2, 'CLUSTER.USER', 2, 'Cluster User', 8, 6),
-    (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 9, 2),
-    (4, 'VIEW.USER', 3, 'View User', 10, 7),
-    (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 11, 3),
-    (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 12, 4),
-    (7, 'SERVICE.OPERATOR', 2, 'Service Operator', 13, 5)
-
-  INSERT INTO roleauthorization(authorization_id, authorization_name)
-    SELECT 'VIEW.USE', 'Use View' UNION ALL
-    SELECT 'SERVICE.VIEW_METRICS', 'View metrics' UNION ALL
-    SELECT 'SERVICE.VIEW_STATUS_INFO', 'View status information' UNION ALL
-    SELECT 'SERVICE.VIEW_CONFIGS', 'View configurations' UNION ALL
-    SELECT 'SERVICE.COMPARE_CONFIGS', 'Compare configurations' UNION ALL
-    SELECT 'SERVICE.VIEW_ALERTS', 'View service-level alerts' UNION ALL
-    SELECT 'SERVICE.START_STOP', 'Start/Stop/Restart Service' UNION ALL
-    SELECT 'SERVICE.DECOMMISSION_RECOMMISSION', 'Decommission/recommission' UNION ALL
-    SELECT 'SERVICE.RUN_SERVICE_CHECK', 'Run service checks' UNION ALL
-    SELECT 'SERVICE.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
-    SELECT 'SERVICE.RUN_CUSTOM_COMMAND', 'Perform service-specific tasks' UNION ALL
-    SELECT 'SERVICE.MODIFY_CONFIGS', 'Modify configurations' UNION ALL
-    SELECT 'SERVICE.MANAGE_ALERTS', 'Manage service-level alerts' UNION ALL
-    SELECT 'SERVICE.MANAGE_CONFIG_GROUPS', 'Manage configuration groups' UNION ALL
-    SELECT 'SERVICE.MOVE', 'Move service to another host' UNION ALL
-    SELECT 'SERVICE.ENABLE_HA', 'Enable HA' UNION ALL
-    SELECT 'SERVICE.TOGGLE_ALERTS', 'Enable/disable service-level alerts' UNION ALL
-    SELECT 'SERVICE.ADD_DELETE_SERVICES', 'Add/delete services' UNION ALL
-    SELECT 'SERVICE.VIEW_OPERATIONAL_LOGS', 'View service operational logs' UNION ALL
-    SELECT 'SERVICE.SET_SERVICE_USERS_GROUPS', 'Set service users and groups' UNION ALL
-    SELECT 'SERVICE.MANAGE_AUTO_START', 'Manage service auto-start' UNION ALL
-    SELECT 'HOST.VIEW_METRICS', 'View metrics' UNION ALL
-    SELECT 'HOST.VIEW_STATUS_INFO', 'View status information' UNION ALL
-    SELECT 'HOST.VIEW_CONFIGS', 'View configuration' UNION ALL
-    SELECT 'HOST.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
-    SELECT 'HOST.ADD_DELETE_COMPONENTS', 'Install components' UNION ALL
-    SELECT 'HOST.ADD_DELETE_HOSTS', 'Add/Delete hosts' UNION ALL
-    SELECT 'CLUSTER.VIEW_METRICS', 'View metrics' UNION ALL
-    SELECT 'CLUSTER.VIEW_STATUS_INFO', 'View status information' UNION ALL
-    SELECT 'CLUSTER.VIEW_CONFIGS', 'View configuration' UNION ALL
-    SELECT 'CLUSTER.VIEW_STACK_DETAILS', 'View stack version details' UNION ALL
-    SELECT 'CLUSTER.VIEW_ALERTS', 'View cluster-level alerts' UNION ALL
-    SELECT 'CLUSTER.MANAGE_CREDENTIALS', 'Manage external credentials' UNION ALL
-    SELECT 'CLUSTER.MODIFY_CONFIGS', 'Modify cluster configurations' UNION ALL
-    SELECT 'CLUSTER.MANAGE_ALERTS', 'Manage cluster-level alerts' UNION ALL
-    SELECT 'CLUSTER.MANAGE_USER_PERSISTED_DATA', 'Manage cluster-level user persisted data' UNION ALL
-    SELECT 'CLUSTER.TOGGLE_ALERTS', 'Enable/disable cluster-level alerts' UNION ALL
-    SELECT 'CLUSTER.MANAGE_CONFIG_GROUPS', 'Manage cluster config groups' UNION ALL
-    SELECT 'CLUSTER.TOGGLE_KERBEROS', 'Enable/disable Kerberos' UNION ALL
-    SELECT 'CLUSTER.UPGRADE_DOWNGRADE_STACK', 'Upgrade/downgrade stack' UNION ALL
-    SELECT 'CLUSTER.RUN_CUSTOM_COMMAND', 'Perform custom cluster-level actions' UNION ALL
-    SELECT 'CLUSTER.MANAGE_AUTO_START', 'Manage service auto-start configuration' UNION ALL
-    SELECT 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS', 'Manage alert notifications configuration' UNION ALL
-    SELECT 'AMBARI.ADD_DELETE_CLUSTERS', 'Create new clusters' UNION ALL
-    SELECT 'AMBARI.RENAME_CLUSTER', 'Rename clusters' UNION ALL
-    SELECT 'AMBARI.MANAGE_SETTINGS', 'Manage settings' UNION ALL
-    SELECT 'AMBARI.MANAGE_USERS', 'Manage users' UNION ALL
-    SELECT 'AMBARI.MANAGE_GROUPS', 'Manage groups' UNION ALL
-    SELECT 'AMBARI.MANAGE_VIEWS', 'Manage Ambari Views' UNION ALL
-    SELECT 'AMBARI.ASSIGN_ROLES', 'Assign roles' UNION ALL
-    SELECT 'AMBARI.MANAGE_STACK_VERSIONS', 'Manage stack versions' UNION ALL
-    SELECT 'AMBARI.EDIT_STACK_REPOS', 'Edit stack repository URLs' UNION ALL
-    SELECT 'AMBARI.RUN_CUSTOM_COMMAND', 'Perform custom administrative actions'
-
-  -- Set authorizations for View User role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='VIEW.USER'
-
-  -- Set authorizations for Cluster User role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.USER'
-
-  -- Set authorizations for Service Operator role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR'
-
-  -- Set authorizations for Service Administrator role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR'
-
-  -- Set authorizations for Cluster Operator role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR'
-
-  -- Set authorizations for Cluster Administrator role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR'
-
-  -- Set authorizations for Administrator role
-  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
-    SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.ADD_DELETE_CLUSTERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.RENAME_CLUSTER' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.MANAGE_SETTINGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.MANAGE_USERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.MANAGE_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.MANAGE_VIEWS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.ASSIGN_ROLES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.MANAGE_STACK_VERSIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.EDIT_STACK_REPOS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
-    SELECT permission_id, 'AMBARI.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR'
-
-  insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
-    select 1, 1, 1, 1
-
-  insert into metainfo(metainfo_key, metainfo_value)
-    select 'version','${ambariSchemaVersion}'
-COMMIT TRANSACTION
-
--- Quartz tables
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_job_details') AND type = 'U')
-BEGIN
-CREATE TABLE qrtz_job_details
-  (
-    SCHED_NAME VARCHAR(120) NOT NULL,
-    JOB_NAME  VARCHAR(200) NOT NULL,
-    JOB_GROUP VARCHAR(200) NOT NULL,
-    DESCRIPTION VARCHAR(250) NULL,
-    JOB_CLASS_NAME   VARCHAR(250) NOT NULL,
-    IS_DURABLE BIT NOT NULL,
-    IS_NONCONCURRENT BIT NOT NULL,
-    IS_UPDATE_DATA BIT NOT NULL,
-    REQUESTS_RECOVERY BIT NOT NULL,
-    JOB_DATA VARBINARY(MAX) NULL,
-    PRIMARY KEY CLUSTERED (SCHED_NAME,JOB_NAME,JOB_GROUP)
-)
-END
-
-
-IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_triggers') AND type = 'U')
-BEGIN
-CREATE TABLE qrtz_triggers
-  (
-    SCHED_NAME VARCHAR(120) NOT NULL,
-    TRIGGER_NAME VARCHAR(200) NOT NULL,
-    TRIGGER_GROUP VARCHAR(200) NOT NULL,
-    JOB_NAME  VARCHAR(200) NOT NULL,
-    JOB_GROUP VARCHAR(200) NOT NULL,
-    DESCRIPTION VARCHAR(

<TRUNCATED>

[43/50] [abbrv] ambari git commit: AMBARI-22178. Install and upgrade options are shown for current and already upgraded stack versions (ncole)

Posted by ja...@apache.org.
AMBARI-22178. Install and upgrade options are shown for current and already upgraded stack versions (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8908d3e0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8908d3e0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8908d3e0

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 8908d3e05a546cd3dea4bf84d54f087d64ce8b88
Parents: b129536
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Oct 10 09:44:20 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Tue Oct 10 10:18:06 2017 -0400

----------------------------------------------------------------------
 .../upgrade/HostVersionOutOfSyncListener.java         | 11 +++++++++++
 .../upgrade/HostVersionOutOfSyncListenerTest.java     | 14 +++++++-------
 2 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8908d3e0/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
index 0be036e..4ce855d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.StackId;
 import org.slf4j.Logger;
@@ -115,6 +116,10 @@ public class HostVersionOutOfSyncListener {
       List<HostVersionEntity> hostVersionEntities =
           hostVersionDAO.get().findByClusterAndHost(cluster.getClusterName(), event.getHostName());
 
+      Service service = cluster.getService(event.getServiceName());
+      ServiceComponent serviceComponent = service.getServiceComponent(event.getComponentName());
+      RepositoryVersionEntity componentRepo = serviceComponent.getDesiredRepositoryVersion();
+
       for (HostVersionEntity hostVersionEntity : hostVersionEntities) {
         StackEntity hostStackEntity = hostVersionEntity.getRepositoryVersion().getStack();
         StackId hostStackId = new StackId(hostStackEntity);
@@ -136,6 +141,12 @@ public class HostVersionOutOfSyncListener {
           continue;
         }
 
+        // !!! we shouldn't be changing other versions to OUT_OF_SYNC if the event
+        // component repository doesn't match
+        if (!hostVersionEntity.getRepositoryVersion().equals(componentRepo)) {
+          continue;
+        }
+
         switch (hostVersionEntity.getState()) {
           case INSTALLED:
           case NOT_REQUIRED:

http://git-wip-us.apache.org/repos/asf/ambari/blob/8908d3e0/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
index 076190a..24d4f55 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.events.listeners.upgrade;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -335,12 +336,12 @@ public class HostVersionOutOfSyncListenerTest {
 
     for (HostVersionEntity hostVersionEntity : hostVersions) {
       RepositoryVersionEntity repoVersion = hostVersionEntity.getRepositoryVersion();
-      if (repoVersion.getVersion().equals(INSTALLED_VERSION) || repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
-        if (changedHosts.contains(hostVersionEntity.getHostName())) {
-          assertEquals(hostVersionEntity.getState(), RepositoryVersionState.OUT_OF_SYNC);
-        } else {
-          assertEquals(hostVersionEntity.getState(), RepositoryVersionState.INSTALLED);
-        }
+
+      if (repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
+        assertEquals(RepositoryVersionState.INSTALLED, hostVersionEntity.getState());
+      } else if (repoVersion.getVersion().equals(INSTALLED_VERSION)) {
+        assertTrue(changedHosts.contains(hostVersionEntity.getHostName()));
+        assertEquals(RepositoryVersionState.OUT_OF_SYNC, hostVersionEntity.getState());
       }
     }
   }
@@ -598,6 +599,5 @@ public class HostVersionOutOfSyncListenerTest {
         }
       }
     }
-
   }
 }


[33/50] [abbrv] ambari git commit: Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)"

Posted by ja...@apache.org.
Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)"

This reverts commit 57bb1365e414c1f110d2d142fa198fb8e043af95.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e0fe291
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e0fe291
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e0fe291

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 7e0fe2913619f4b70097e3ebcd7fb89e84eee62d
Parents: 5af1e53
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 9 11:26:04 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 9 11:26:04 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/config.js                        |  6 ++--
 ambari-web/app/routes/add_service_routes.js     |  2 +-
 ambari-web/app/routes/main.js                   |  2 +-
 .../app/templates/main/admin/kerberos.hbs       | 34 +++++++++-----------
 .../main/service/all_services_actions.hbs       |  6 ++--
 ambari-web/app/views/main/admin.js              | 14 ++++----
 .../main/admin/stack_upgrade/services_view.js   |  2 +-
 ambari-web/app/views/main/menu.js               | 16 ++++-----
 ambari-web/app/views/main/service/item.js       |  2 +-
 .../admin/stack_upgrade/services_view_test.js   |  1 -
 10 files changed, 36 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 0963f70..ba1b75d 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -86,11 +86,9 @@ App.supports = {
   addingNewRepository: false,
   kerberosStackAdvisor: true,
   logCountVizualization: false,
-  createAlerts: false,
-  enabledWizardForHostOrderedUpgrade: true,
   manageJournalNode: true,
-  enableToggleKerberos: true,
-  enableAddDeleteServices: true
+  createAlerts: false,
+  enabledWizardForHostOrderedUpgrade: true
 };
 
 if (App.enableExperimental) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 75b3586..1615f0d 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -24,7 +24,7 @@ module.exports = App.WizardRoute.extend({
   route: '/service/add',
 
   enter: function (router) {
-    if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') && App.supports.enableAddDeleteServices) {
+    if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
       // `getSecurityStatus` call is required to retrieve information related to kerberos type: Manual or automated kerberos
       router.get('mainController').isLoading.call(router.get('clusterController'),'isClusterNameLoaded').done(function () {
         App.router.get('mainAdminKerberosController').getSecurityStatus().always(function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/routes/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 7ed18de..30cc8aa 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -460,7 +460,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
 
       route: '/kerberos',
       enter: function (router, transition) {
-        if (router.get('loggedIn') && (!App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || !App.supports.enableToggleKerberos)) {
+        if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.TOGGLE_KERBEROS')) {
           router.transitionTo('main.dashboard.index');
         }
       },

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index 2b41122..e7bb618 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -20,22 +20,20 @@
     <div>
       <p class="text-success">{{t admin.security.enabled}}
         {{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
-            {{#if App.supports.enableToggleKerberos}}
-              <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
-              {{#unless isManualKerberos}}
-                <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
-                  <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
-                {{#if App.isCredentialStorePersistent}}
-                  <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
-                {{/if}}
-              {{/unless}}
-              <br/>
-              {{#unless isEditMode}}
-                <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
-                  {{t common.edit}}
-                </a>
-              {{/unless}}
+          <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
+          {{#unless isManualKerberos}}
+            <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+              <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
+            {{#if App.isCredentialStorePersistent}}
+              <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
             {{/if}}
+          {{/unless}}
+          <br/>
+          {{#unless isEditMode}}
+            <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
+              {{t common.edit}}
+            </a>
+          {{/unless}}
         {{/isAuthorized}}
       </p>
     </div>
@@ -53,10 +51,8 @@
     <div>
       <p class="muted background-text">{{t admin.security.disabled}}
       {{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
-        {{#if App.supports.enableToggleKerberos}}
-          <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
-          <br/>
-        {{/if}}
+        <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
+        <br/>
       {{/isAuthorized}}
       </p>
     </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/templates/main/service/all_services_actions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/all_services_actions.hbs b/ambari-web/app/templates/main/service/all_services_actions.hbs
index a9e122b..3e87cb2 100644
--- a/ambari-web/app/templates/main/service/all_services_actions.hbs
+++ b/ambari-web/app/templates/main/service/all_services_actions.hbs
@@ -22,14 +22,12 @@
   </div>
   <ul class="dropdown-menu">
     {{#isAuthorized "SERVICE.ADD_DELETE_SERVICES"}}
-      {{#if App.supports.enableAddDeleteServices}}
-        <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
+      <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
         <a href="#"
           {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
           {{action gotoAddService target="view.serviceController"}}>
           <i class="glyphicon glyphicon-plus"></i> {{t services.service.add}}</a>
-        </li>
-      {{/if}}
+      </li>
     {{/isAuthorized}}
     {{#isAuthorized "SERVICE.START_STOP"}}
       <li class="divider"></li>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/admin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 05d0f56..509f380 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -39,14 +39,12 @@ App.MainAdminView = Em.View.extend({
       });
     }
     if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding')) ) {
-      if (App.supports.enableToggleKerberos) {
-        items.push({
-          name: 'kerberos',
-          url: 'adminKerberos.index',
-          label: Em.I18n.t('common.kerberos'),
-          disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
-        });
-      }
+      items.push({
+        name: 'kerberos',
+        url: 'adminKerberos.index',
+        label: Em.I18n.t('common.kerberos'),
+        disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+      });
     }
     if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
       if (App.supports.serviceAutoStart) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
index 25efffe..f566814 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
@@ -56,7 +56,7 @@ App.MainAdminStackServicesView = Em.View.extend({
    * @param event
    */
   goToAddService: function (event) {
-    if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') || !App.supports.enableAddDeleteServices) {
+    if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
       return;
     } else if (event.context == "KERBEROS") {
       App.router.get('mainAdminKerberosController').checkAndStartKerberosWizard();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 32c4f6f..4bb53ae 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -118,15 +118,13 @@ App.MainSideMenuView = Em.CollectionView.extend({
           });
         }
         if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || upg) {
-          if (App.supports.enableToggleKerberos) {
-            categories.push({
-              name: 'kerberos',
-              url: 'kerberos/',
-              label: Em.I18n.t('common.kerberos'),
-              disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
-              href: router.urlFor('main.admin.adminKerberos')
-            });
-          }
+          categories.push({
+            name: 'kerberos',
+            url: 'kerberos/',
+            label: Em.I18n.t('common.kerberos'),
+            disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
+            href: router.urlFor('main.admin.adminKerberos')
+          });
         }
         if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || upg) {
           if (App.supports.serviceAutoStart) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 45c783b..37e0904 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -289,7 +289,7 @@ App.MainServiceItemView = Em.View.extend({
       options.push(actionMap.DOWNLOAD_CLIENT_CONFIGS);
     }
 
-    if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES") && App.supports.enableAddDeleteServices) {
+    if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES")) {
       options.push(actionMap.DELETE_SERVICE);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e0fe291/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
index da75cf2..70d182c 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
@@ -34,7 +34,6 @@ describe('App.MainAdminStackServicesView', function () {
       sinon.stub(App.router, 'get').returns(mock);
       sinon.spy(mock, 'checkAndStartKerberosWizard');
       isAccessibleMock = sinon.stub(App, 'isAuthorized');
-      App.set('supports.enableAddDeleteServices', true);
     });
     afterEach(function() {
       App.get('router').transitionTo.restore();


[48/50] [abbrv] ambari git commit: AMBARI-22137 - Different stack versions should be able to link to different extension versions

Posted by ja...@apache.org.
AMBARI-22137 - Different stack versions should be able to link to different extension versions


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/03273bdc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/03273bdc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/03273bdc

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 03273bdce970a1282d89056aa48dc2ffe3b7b712
Parents: a3a8afc
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Oct 10 13:54:01 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Oct 10 13:54:01 2017 -0700

----------------------------------------------------------------------
 .../controller/AmbariManagementHelper.java      |   2 +-
 .../ambari/server/stack/ExtensionHelper.java    |  57 ++-
 .../apache/ambari/server/stack/StackModule.java |   4 +
 .../server/stack/StackManagerExtensionTest.java |  31 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |   2 +-
 .../stacks_with_extensions/HDP/0.4/metainfo.xml |  22 ++
 .../HDP/0.4/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.4/services/HBASE/metainfo.xml         |  26 ++
 .../0.4/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.4/services/HDFS/metainfo.xml          |  30 ++
 .../0.4/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.4/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.4/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.4/services/ZOOKEEPER/metainfo.xml     |  26 ++
 19 files changed, 1425 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
index 2dd6f12..0c8edfe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -70,7 +70,7 @@ public class AmbariManagementHelper {
    */
   public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
     validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackManager, stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
     stackManager.linkStackToExtension(stackInfo, extensionInfo);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
index 86e532a..91dc870 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
@@ -27,6 +27,8 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An extension version is like a stack version but it contains custom services.  Linking an extension
@@ -35,6 +37,8 @@ import org.apache.ambari.server.utils.VersionUtils;
  */
 public class ExtensionHelper {
 
+  private final static Logger LOG = LoggerFactory.getLogger(ExtensionHelper.class);
+
   public static void validateDeleteLink(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateNotRequiredExtension(stack, extension);
     validateServicesNotInstalled(clusters, stack, extension);
@@ -62,9 +66,9 @@ public class ExtensionHelper {
     }
   }
 
-  public static void validateCreateLink(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  public static void validateCreateLink(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateSupportedStackVersion(stack, extension);
-    validateServiceDuplication(stack, extension);
+    validateServiceDuplication(stackManager, stack, extension);
     validateRequiredExtensions(stack, extension);
   }
 
@@ -88,15 +92,24 @@ public class ExtensionHelper {
     throw new AmbariException(message);
   }
 
-  private static void validateServiceDuplication(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    LOG.debug("Looking for duplicate services");
     for (ServiceInfo service : extension.getServices()) {
+      LOG.debug("Looking for duplicate service " + service.getName());
       if (service != null) {
         ServiceInfo stackService = null;
         try {
           stackService = stack.getService(service.getName());
+          if (stackService != null) {
+            LOG.debug("Found service " + service.getName());
+            if (isInheritedExtensionService(stackManager, stack, service.getName(), extension.getName())) {
+              stackService = null;
+            }
+          }
         }
         catch (Exception e) {
           //Eat the exception
+          LOG.error("Error validating service duplication", e);
         }
         if (stackService != null) {
           String message = "Existing service is included in extension"
@@ -112,6 +125,44 @@ public class ExtensionHelper {
     }
   }
 
+  private static boolean isInheritedExtensionService(StackManager stackManager, StackInfo stack, String serviceName, String extensionName) {
+    // Check if service is from an extension at the current stack level, if so then it isn't inherited from its parent stack version
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Service is at requested stack/version level " + serviceName);
+      return false;
+    }
+
+    return isExtensionService(stackManager, stack.getName(), stack.getParentStackVersion(), serviceName, extensionName);
+  }
+
+  private static boolean isExtensionService(StackManager stackManager, String stackName, String stackVersion, String serviceName, String extensionName) {
+    LOG.debug("Checking at stack/version " + stackName + "/" + stackVersion);
+    StackInfo stack = stackManager.getStack(stackName, stackVersion);
+
+    if (stack == null) {
+      LOG.warn("Stack/version not found " + stackName + "/" + stackVersion);
+      return false;
+    }
+
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Stack/version " + stackName + "/" + stackVersion + " contains service " + serviceName);
+      return true;
+    }
+    else {
+      return isExtensionService(stackManager, stackName, stack.getParentStackVersion(), serviceName, extensionName);
+    }
+  }
+
+  private static boolean isExtensionService(StackInfo stack, String serviceName, String extensionName) {
+    ExtensionInfo extension = stack.getExtension(extensionName);
+    if (extension == null) {
+      LOG.debug("Extension not found " + extensionName);
+      return false;
+    }
+
+    return extension.getService(serviceName) != null;
+  }
+
   private static void validateRequiredExtensions(StackInfo stack, ExtensionInfo extension) throws AmbariException {
     for (ExtensionMetainfoXml.Extension requiredExtension : extension.getExtensions()) {
       if (requiredExtension != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 520764d..b109331 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -198,6 +198,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     if (parentVersion != null) {
       mergeStackWithParent(parentVersion, allStacks, commonServices, extensions);
     }
+
     for (ExtensionInfo extension : stackInfo.getExtensions()) {
       String extensionKey = extension.getName() + StackManager.PATH_DELIMITER + extension.getVersion();
       ExtensionModule extensionModule = extensions.get(extensionKey);
@@ -402,6 +403,9 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
+      for (Map.Entry<String, ServiceModule> entry : extension.getServiceModules().entrySet()) {
+        serviceModules.put(entry.getKey(), entry.getValue());
+      }
       stackInfo.addExtension(extension.getModuleInfo());
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 20c8f40..cef30b5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -69,6 +69,9 @@ public class StackManagerExtensionTest  {
     StackEntity stack3 = new StackEntity();
     stack3.setStackName("HDP");
     stack3.setStackVersion("0.3");
+    StackEntity stack4 = new StackEntity();
+    stack4.setStackName("HDP");
+    stack4.setStackVersion("0.4");
     ExtensionEntity extension1 = new ExtensionEntity();
     extension1.setExtensionName("EXT");
     extension1.setExtensionVersion("0.1");
@@ -78,19 +81,28 @@ public class StackManagerExtensionTest  {
     ExtensionEntity extension3 = new ExtensionEntity();
     extension3.setExtensionName("EXT");
     extension3.setExtensionVersion("0.3");
+    ExtensionLinkEntity link1 = new ExtensionLinkEntity();
+    link1.setLinkId(new Long(-1));
+    link1.setStack(stack1);
+    link1.setExtension(extension1);
     List<ExtensionLinkEntity> list = new ArrayList<>();
+    List<ExtensionLinkEntity> linkList = new ArrayList<>();
+    linkList.add(link1);
 
     expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
     expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
     expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(stackDao.find("HDP", "0.4")).andReturn(stack3).atLeastOnce();
     expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
     expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
     expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
 
+    expect(linkDao.findByStack("HDP", "0.1")).andReturn(linkList).atLeastOnce();
     expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", "0.1")).andReturn(link1).atLeastOnce();
 
     replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
@@ -144,21 +156,34 @@ public class StackManagerExtensionTest  {
     assertNotNull(themes);
     assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
 
-    StackInfo stack = stackManager.getStack("HDP", "0.2");
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
     assertNotNull(stack.getService("OOZIE2"));
     oozie = stack.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
-    assertEquals(oozie.getVersion(), "4.0.0");
+    assertEquals(oozie.getVersion(), "3.2.0");
 
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.1");
+
+    stack = stackManager.getStack("HDP", "0.2");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 0);
 
     stack = stackManager.getStack("HDP", "0.3");
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
+    assertNotNull(extension.getService("OOZIE2"));
+    oozie = extension.getService("OOZIE2");
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.4");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
     assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 0d37b3e..c95a20f 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
index d827314..1b6ce73 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
new file mode 100644
index 0000000..3b4897f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.3</upgrade>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>


[41/50] [abbrv] ambari git commit: Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

Posted by ja...@apache.org.
Revert "AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)"

This reverts commit e037a8d7194ac97da9f746e52eb53cf15ba2415f.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cec9f730
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cec9f730
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cec9f730

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: cec9f73008bb1ec101450e2915fa06c81fe64e56
Parents: e19db40
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 10 16:08:14 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 10 16:08:14 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  | 45 ++++++--------------
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      | 10 ++---
 .../2.1.0.2.0/package/scripts/install_params.py |  6 +++
 .../2.1.0.2.0/package/scripts/params_linux.py   |  2 +
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      | 10 ++---
 .../3.0.0.3.0/package/scripts/install_params.py |  6 +++
 .../3.0.0.3.0/package/scripts/params_linux.py   |  2 +
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  6 +--
 .../4.0.0.2.0/package/scripts/params_linux.py   |  3 ++
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py    |  5 +--
 .../4.2.0.3.0/package/scripts/params_linux.py   |  3 ++
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  2 -
 .../src/test/python/stacks/utils/RMFTestCase.py |  4 +-
 13 files changed, 48 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index bf8c0dc..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,7 +501,6 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
-
     return Script.stack_version_from_distro_select
 
 
@@ -526,20 +525,22 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-
+    
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    if not STACK_VERSION_PLACEHOLDER in name:
-      return name
-
     stack_version_package_formatted = ""
 
+    if not repo_version:
+      repo_version = self.get_stack_version_before_packages_installed()
+
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -549,17 +550,6 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
-    package_version = None
-    if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
-      self.load_available_packages()
-      package_name = self.get_package_from_available(name, self.available_packages_in_repos)
-      if package_name is None:
-        raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
-      return package_name
-
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
-
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -578,7 +568,6 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
-      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -771,19 +760,6 @@ class Script(object):
     """
     self.install_packages(env)
 
-  def load_available_packages(self):
-    if self.available_packages_in_repos:
-      return self.available_packages_in_repos
-
-
-    pkg_provider = get_provider("Package")   
-    try:
-      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
-    except Exception as err:
-      Logger.exception("Unable to load available packages")
-      self.available_packages_in_repos = []
-
-
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -806,11 +782,17 @@ class Script(object):
       package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
+      pkg_provider = get_provider("Package")
+      try:
+        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
+      except Exception as err:
+        Logger.exception("Unable to load available packages")
+        available_packages_in_repos = []
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.format_package_name(package['name'])
+            name = self.get_package_from_available(package['name'], available_packages_in_repos)
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1110,6 +1092,5 @@ class Script(object):
 
 
   def __init__(self):
-    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 07c7616..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -25,7 +25,6 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -139,11 +138,10 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled:
-    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
-    Package(lzo_packages,
-            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-            retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled and len(params.lzo_packages) > 0:
+      Package(params.lzo_packages,
+              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+              retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
index 235f231..fe488c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
@@ -23,6 +23,7 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
+  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -31,3 +32,8 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+  lzo_packages = get_lzo_packages(stack_version_unformatted)
+
+  exclude_packages = []
+  if not lzo_enabled:
+    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index bb6349b..76b430b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -40,6 +40,7 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,6 +389,7 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 07c7616..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -25,7 +25,6 @@ from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -139,11 +138,10 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled:
-    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
-    Package(lzo_packages,
-            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-            retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled and len(params.lzo_packages) > 0:
+      Package(params.lzo_packages,
+              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+              retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
index 235f231..fe488c3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
@@ -23,6 +23,7 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
+  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -31,3 +32,8 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+  lzo_packages = get_lzo_packages(stack_version_unformatted)
+
+  exclude_packages = []
+  if not lzo_enabled:
+    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index 2fa6208..de735f4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -40,6 +40,7 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -377,6 +378,7 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index f215a1e..64f9d54 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -37,7 +37,6 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.security_commons import update_credential_provider_path
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.core.resources.packaging import Package
 from resource_management.core.shell import as_user, as_sudo, call, checked_call
 from resource_management.core.exceptions import Fail
@@ -306,9 +305,8 @@ def oozie_server_specific(upgrade_type):
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled:
-    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
-    Package(all_lzo_packages,
+  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+    Package(params.all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index a0f0672..b66e157 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,6 +30,7 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -387,3 +388,5 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
index 0771e93..d916d3b 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -275,9 +275,8 @@ def oozie_server_specific():
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled:
-    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
-    Package(all_lzo_packages,
+  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+    Package(params.all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
index 70b89b7..d30a465 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -369,3 +370,5 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index e32393d..2224d31 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,10 +95,8 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
-                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
 
-
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
                                 retry_count=5,

http://git-wip-us.apache.org/repos/asf/ambari/blob/cec9f730/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index ae33a2a..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,8 +80,7 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False,
-                    available_packages_in_repos = []):
+                    log_out_files=False):
 
     norm_path = os.path.normpath(path)
 
@@ -126,7 +125,6 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
-        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))


[08/50] [abbrv] ambari git commit: AMBARI-14714. add new blueprint schema elements (magyari_sandor)

Posted by ja...@apache.org.
AMBARI-14714. add new blueprint schema elements (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1e90bd57
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1e90bd57
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1e90bd57

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 1e90bd576a228bea77e4c41f110775af99083c73
Parents: 9c8ed58
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri Oct 6 18:27:31 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Fri Oct 6 18:29:06 2017 +0200

----------------------------------------------------------------------
 .../apache/ambari/server/topology/Service.java  | 81 ++++++++++++++++++++
 .../ambari/server/topology/ServiceGroup.java    | 67 ++++++++++++++++
 2 files changed, 148 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1e90bd57/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
new file mode 100644
index 0000000..66c0dc3
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+
+import org.apache.ambari.server.controller.internal.Stack;
+
+import java.util.Set;
+
+public class Service {
+
+  private final String type;
+
+  private final String name;
+
+  private final Stack stack;
+
+  private final Configuration configuration;
+
+  private final Set<Service> dependentServices;
+
+  public Service(String type, Stack stack) {
+    this(type, null, stack, null, null);
+  }
+
+  /**
+   * In case there's no name specified name will be set to type.
+   * @param type
+   * @param name
+   * @param stack
+   * @param configuration
+   */
+  public Service(String type, String name, Stack stack, Configuration configuration, Set<Service> dependentServices) {
+    this.type = type;
+    if (name == null) {
+      this.name = type;
+    } else {
+      this.name = name;
+    }
+    this.stack = stack;
+    this.configuration = configuration;
+    this.dependentServices = dependentServices;
+  }
+
+  /**
+   * Gets the name of this service
+   *
+   * @return component name
+   */
+  public String getName() {
+    return this.name;
+  }
+
+  public String getType() {
+    return type;
+  }
+
+  public Stack getStack() {
+    return stack;
+  }
+
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1e90bd57/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
new file mode 100644
index 0000000..8e66f02
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology;
+
+
+import java.util.Collection;
+import java.util.Set;
+
+public class ServiceGroup {
+
+  private final String name;
+
+  private final Collection<Service> services;
+
+  private final Configuration configuration;
+
+  private final Set<ServiceGroup> dependencies;
+
+  public ServiceGroup(String name, Collection<Service> services) {
+    this(name, services, null, null);
+  }
+
+  public ServiceGroup(String name, Collection<Service> services, Configuration configuration, Set<ServiceGroup> dependencies) {
+    this.name = name;
+    this.services = services;
+    this.configuration = configuration;
+    this.dependencies = dependencies;
+  }
+
+  /**
+   * Gets the name of this service group
+   *
+   * @return component name
+   */
+  public String getName() {
+    return this.name;
+  }
+
+
+  public Collection<Service> getServices() {
+    return services;
+  }
+
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+
+  public Set<ServiceGroup> getDependencies() {
+    return dependencies;
+  }
+}


[32/50] [abbrv] ambari git commit: AMBARI-22138. When regenerating keytab files for a service, non-service-specific principals are affected (rlevas)

Posted by ja...@apache.org.
AMBARI-22138. When regenerating keytab files for a service, non-service-specific principals are affected (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5af1e539
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5af1e539
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5af1e539

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 5af1e539cce928b32fc5aca67c7bf8dbc2bd3c2e
Parents: b0c24a5
Author: Robert Levas <rl...@hortonworks.com>
Authored: Mon Oct 9 13:06:13 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Mon Oct 9 13:06:19 2017 -0400

----------------------------------------------------------------------
 .../server/controller/KerberosHelper.java       |   9 +-
 .../server/controller/KerberosHelperImpl.java   | 244 ++++++++++++-------
 .../utilities/RemovableIdentities.java          |   2 +-
 .../kerberos/CreateKeytabFilesServerAction.java |   2 +-
 .../kerberos/CreatePrincipalsServerAction.java  |   2 +-
 .../kerberos/KerberosServerAction.java          |  71 ++++--
 .../PrepareDisableKerberosServerAction.java     |   3 +-
 .../PrepareEnableKerberosServerAction.java      |   6 +-
 .../PrepareKerberosIdentitiesServerAction.java  | 142 ++++++++---
 .../kerberos/AbstractKerberosDescriptor.java    |  25 ++
 .../AbstractKerberosDescriptorContainer.java    |  18 +-
 .../kerberos/KerberosIdentityDescriptor.java    | 160 ++++++++++++
 .../server/controller/KerberosHelperTest.java   |   5 -
 .../utilities/KerberosIdentityCleanerTest.java  |   8 +-
 .../state/kerberos/KerberosDescriptorTest.java  | 150 +++++++++++-
 15 files changed, 665 insertions(+), 182 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index 20c5708..b8e1be1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -442,12 +442,6 @@ public interface KerberosHelper {
    * @param hostFilter             a set of hostname indicating the set of hosts to process -
    *                               if null, no filter is relevant; if empty, the filter
    *                               indicates no relevant hosts
-   * @param identityFilter         a Collection of identity names indicating the relevant
-   *                               identities - if null, no filter is relevant; if empty,
-   *                               the filter indicates no relevant identities
-   * @param shouldProcessCommand   a Command implementation to determine if the relevant component
-   *                               is in a state in which is should be process for the current
-   *                               Kerberos operation.
    * @return a list of ServiceComponentHost instances and should be processed during the relevant
    * Kerberos operation.
    * @throws AmbariException
@@ -455,8 +449,7 @@ public interface KerberosHelper {
   List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
                                                                KerberosDescriptor kerberosDescriptor,
                                                                Map<String, ? extends Collection<String>> serviceComponentFilter,
-                                                               Collection<String> hostFilter, Collection<String> identityFilter,
-                                                               Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+                                                               Collection<String> hostFilter)
       throws AmbariException;
 
   Set<String> getHostsWithValidKerberosClient(Cluster cluster) throws AmbariException;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index b691968..f8fe31a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -122,6 +122,7 @@ import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -268,10 +269,12 @@ public class KerberosHelperImpl implements KerberosHelper {
               boolean updateConfigurations = !requestProperties.containsKey(DIRECTIVE_IGNORE_CONFIGS)
                   || !"true".equalsIgnoreCase(requestProperties.get(DIRECTIVE_IGNORE_CONFIGS));
 
+              boolean forceAllHosts = (hostFilter == null) || (hostFilter.contains("*"));
+
               if ("true".equalsIgnoreCase(value) || "all".equalsIgnoreCase(value)) {
-                handler = new CreatePrincipalsAndKeytabsHandler(true, updateConfigurations, true);
+                handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.RECREATE_ALL, updateConfigurations, forceAllHosts, true);
               } else if ("missing".equalsIgnoreCase(value)) {
-                handler = new CreatePrincipalsAndKeytabsHandler(false, updateConfigurations, true);
+                handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.CREATE_MISSING, updateConfigurations, forceAllHosts, true);
               }
 
               if (handler != null) {
@@ -326,7 +329,7 @@ public class KerberosHelperImpl implements KerberosHelper {
         if (serviceComponentsArray.length == 2) {
           serviceComponentFilter.put(serviceName, ImmutableSet.copyOf(serviceComponentsArray[1].split(";")));
         } else {
-          serviceComponentFilter.put(serviceName, null);
+          serviceComponentFilter.put(serviceName, ImmutableSet.of("*"));
         }
       }
       return serviceComponentFilter.build();
@@ -340,7 +343,7 @@ public class KerberosHelperImpl implements KerberosHelper {
                                                 RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException {
     return handle(cluster, getKerberosDetails(cluster, manageIdentities), serviceComponentFilter, hostFilter, identityFilter,
-        hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(false, false,
+        hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false,
             false));
   }
 
@@ -1061,7 +1064,7 @@ public class KerberosHelperImpl implements KerberosHelper {
                                                   RequestStageContainer requestStageContainer)
       throws KerberosOperationException, AmbariException {
     return handleTestIdentity(cluster, getKerberosDetails(cluster, null), commandParamsStage, requestStageContainer,
-        new CreatePrincipalsAndKeytabsHandler(false, false, false));
+        new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false, false));
   }
 
   @Override
@@ -1230,27 +1233,25 @@ public class KerberosHelperImpl implements KerberosHelper {
   public List<ServiceComponentHost> getServiceComponentHostsToProcess(final Cluster cluster,
                                                                       final KerberosDescriptor kerberosDescriptor,
                                                                       final Map<String, ? extends Collection<String>> serviceComponentFilter,
-                                                                      final Collection<String> hostFilter, Collection<String> identityFilter,
-                                                                      final Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+                                                                      final Collection<String> hostFilter)
       throws AmbariException {
     return getServiceComponentHosts(cluster, new Command<Boolean, ServiceComponentHost>() {
       @Override
       public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
         if (sch != null) {
           // Check the host filter
-          if ((hostFilter == null) || hostFilter.contains(sch.getHostName())) {
+          if ((hostFilter == null) || hostFilter.contains("*") || hostFilter.contains(sch.getHostName())) {
             String serviceName = sch.getServiceName();
 
             // Check the service filter
-            if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey(serviceName)) {
+            if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
               KerberosServiceDescriptor serviceDescriptor = kerberosDescriptor.getService(serviceName);
 
               if (serviceDescriptor != null) {
-                Collection<String> componentFilter = (serviceComponentFilter == null) ? null : serviceComponentFilter.get(serviceName);
+                Collection<String> componentFilter = ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*")) ? null : serviceComponentFilter.get(serviceName);
 
-                // Check the service/component filter and the shouldProcessCommand
-                return (((componentFilter == null) || componentFilter.contains(sch.getServiceComponentName())) &&
-                    ((shouldProcessCommand == null) || shouldProcessCommand.invoke(sch)));
+                // Check the service/component filter
+                return (((componentFilter == null) || componentFilter.contains("*") || componentFilter.contains(sch.getServiceComponentName())));
               }
             }
           }
@@ -1491,8 +1492,9 @@ public class KerberosHelperImpl implements KerberosHelper {
 
     if (identities != null) {
       for (KerberosIdentityDescriptor identity : identities) {
-        // If there is no filter or the filter contains the current identity's name...
-        if ((identityFilter == null) || identityFilter.contains(identity.getName())) {
+        // If there is no filter or the filter contains the current identity's path...
+        if ((identityFilter == null) || identityFilter.contains(identity.getPath())) {
+
           KerberosPrincipalDescriptor principalDescriptor = identity.getPrincipalDescriptor();
           String principal = null;
           String principalType = null;
@@ -2030,10 +2032,7 @@ public class KerberosHelperImpl implements KerberosHelper {
         cluster,
         kerberosDescriptor,
         serviceComponentFilter,
-        hostFilter,
-        identityFilter,
-        arg -> true);
-
+        hostFilter);
 
     // While iterating over all the ServiceComponentHosts find hosts that have KERBEROS_CLIENT
     // components in the INSTALLED state and add them to the hostsWithValidKerberosClient Set.
@@ -3378,12 +3377,11 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
-    public void addDistributeKeytabFilesStage(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts,
-                                              String clusterHostInfoJson, String hostParamsJson,
-                                              Map<String, String> commandParameters,
-                                              RoleCommandOrder roleCommandOrder,
-                                              RequestStageContainer requestStageContainer,
-                                              Set<String> hostsWithValidKerberosClient)
+    void addDistributeKeytabFilesStage(Cluster cluster, String clusterHostInfoJson,
+                                       String hostParamsJson, Map<String, String> commandParameters,
+                                       RoleCommandOrder roleCommandOrder,
+                                       RequestStageContainer requestStageContainer,
+                                       List<String> hosts)
         throws AmbariException {
 
       Stage stage = createNewStage(requestStageContainer.getLastStageId(),
@@ -3393,20 +3391,13 @@ public class KerberosHelperImpl implements KerberosHelper {
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
-      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
-          new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
-
-      if (!filteredComponents.isEmpty()) {
-        List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      if (!hosts.isEmpty()) {
         Map<String, String> requestParams = new HashMap<>();
-        List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
-        RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
-        requestResourceFilters.add(reqResFilter);
 
         ActionExecutionContext actionExecContext = new ActionExecutionContext(
             cluster.getClusterName(),
             SET_KEYTAB,
-            requestResourceFilters,
+            createRequestResourceFilters(hosts),
             requestParams);
         customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage,
             requestParams, null);
@@ -3422,7 +3413,12 @@ public class KerberosHelperImpl implements KerberosHelper {
     /**
      * Send a custom command to the KERBEROS_CLIENT to check if there are missing keytabs on each hosts.
      */
-    public void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson, String hostParamsJson, ServiceComponentHostServerActionEvent event, Map<String, String> commandParameters, RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer, List<ServiceComponentHost> serviceComponentHosts) throws AmbariException {
+    void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson,
+                                     String hostParamsJson, Map<String, String> commandParameters,
+                                     RoleCommandOrder roleCommandOrder,
+                                     RequestStageContainer requestStageContainer,
+                                     List<String> hostsToInclude)
+        throws AmbariException {
       Stage stage = createNewStage(requestStageContainer.getLastStageId(),
           cluster,
           requestStageContainer.getId(),
@@ -3430,20 +3426,13 @@ public class KerberosHelperImpl implements KerberosHelper {
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
-      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
-          new ArrayList<>(serviceComponentHosts), getHostsWithValidKerberosClient(cluster));
-
-      if (!filteredComponents.isEmpty()) {
-        List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      if (!hostsToInclude.isEmpty()) {
         Map<String, String> requestParams = new HashMap<>();
-        List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
-        RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
-        requestResourceFilters.add(reqResFilter);
 
         ActionExecutionContext actionExecContext = new ActionExecutionContext(
           cluster.getClusterName(),
           CHECK_KEYTABS,
-          requestResourceFilters,
+          createRequestResourceFilters(hostsToInclude),
           requestParams);
         customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
       }
@@ -3454,32 +3443,6 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
-    /**
-     * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
-     * <p/>
-     * It is expected that the supplied collection is modifiable. It will be modified inplace.
-     *
-     * @param serviceComponentHosts a collection of ServiceComponentHost items to test
-     * @param hosts                 a set of host names indicating valid hosts
-     * @return a collection of filtered ServiceComponentHost items
-     */
-    private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
-                                                                                 Set<String> hosts) {
-
-      if ((serviceComponentHosts != null) && (hosts != null)) {
-        Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
-        while (iterator.hasNext()) {
-          ServiceComponentHost sch = iterator.next();
-
-          if (!hosts.contains(sch.getHostName())) {
-            iterator.remove();
-          }
-        }
-      }
-
-      return serviceComponentHosts;
-    }
-
     void addDisableSecurityHookStage(Cluster cluster,
                                      String clusterHostInfoJson,
                                      String hostParamsJson,
@@ -3677,6 +3640,13 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
+
+    private List<RequestResourceFilter> createRequestResourceFilters(List<String> hostsToInclude) {
+      List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+      RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToInclude);
+      requestResourceFilters.add(reqResFilter);
+      return requestResourceFilters;
+    }
   }
 
   /**
@@ -3746,6 +3716,8 @@ public class KerberosHelperImpl implements KerberosHelper {
           roleCommandOrder, requestStageContainer);
 
       if (kerberosDetails.manageIdentities()) {
+        List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, false);
+
         commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
 
         // *****************************************************************
@@ -3767,8 +3739,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
         // *****************************************************************
         // Create stage to distribute keytabs
-        addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson, hostParamsJson,
-            commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+        addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+            roleCommandOrder, requestStageContainer, hostsToInclude);
       }
 
       // *****************************************************************
@@ -3885,10 +3857,11 @@ public class KerberosHelperImpl implements KerberosHelper {
    */
   private class CreatePrincipalsAndKeytabsHandler extends Handler {
     /**
-     * A boolean value indicating whether to create keytabs for all principals (<code>true</code>)
-     * or only the ones that are missing (<code>false</code>).
+     * The type of Kerberos operation being performed.
+     *
+     * @see org.apache.ambari.server.serveraction.kerberos.KerberosServerAction.OperationType
      */
-    private boolean regenerateAllKeytabs;
+    private KerberosServerAction.OperationType operationType;
 
     /**
      * A boolean value indicating whether to update service configurations (<code>true</code>)
@@ -3897,6 +3870,14 @@ public class KerberosHelperImpl implements KerberosHelper {
     private boolean updateConfigurations;
 
     /**
+     * A boolean value indicating whether to include all hosts (<code>true</code>) when setting up
+     * agent-side tasks or to select only the hosts found to be relevant (<code>false</code>).
+     * <p>
+     * This is useful if we do not know beforehand, which hosts need to be involved in the operation.
+     */
+    private boolean forceAllHosts;
+
+    /**
      * A boolean value indicating whether to include Ambari server identity (<code>true</code>)
      * or ignore it (<code>false</code>).
      */
@@ -3906,17 +3887,20 @@ public class KerberosHelperImpl implements KerberosHelper {
      * CreatePrincipalsAndKeytabsHandler constructor to set whether this instance should be used to
      * regenerate all keytabs or just the ones that have not been distributed
      *
-     * @param regenerateAllKeytabs A boolean value indicating whether to create keytabs for all
-     *                             principals (<code>true</code> or only the ones that are missing
-     *                             (<code>false</code>)
-     * @param updateConfigurations A boolean value indicating whether to update service configurations
-     *                             (<code>true</code>) or ignore any potential configuration changes
-     *                             (<code>false</code>)
+     * @param operationType         The type of Kerberos operation being performed
+     * @param updateConfigurations  A boolean value indicating whether to update service configurations
+     *                              (<code>true</code>) or ignore any potential configuration changes
+     * @param forceAllHosts         A boolean value indicating whether to include all hosts (<code>true</code>)
+     *                              when setting up agent-side tasks or to select only the hosts found to be
+     *                              relevant (<code>false</code>)
+     * @param includeAmbariIdentity A boolean value indicating whether to include Ambari server
+     *                              identity (<code>true</code>) or ignore it (<code>false</code>)
      */
-    public CreatePrincipalsAndKeytabsHandler(boolean regenerateAllKeytabs, boolean updateConfigurations,
-                                             boolean includeAmbariIdentity) {
-      this.regenerateAllKeytabs = regenerateAllKeytabs;
+    CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType operationType, boolean updateConfigurations,
+                                      boolean forceAllHosts, boolean includeAmbariIdentity) {
+      this.operationType = operationType;
       this.updateConfigurations = updateConfigurations;
+      this.forceAllHosts = forceAllHosts;
       this.includeAmbariIdentity = includeAmbariIdentity;
     }
 
@@ -3947,6 +3931,7 @@ public class KerberosHelperImpl implements KerberosHelper {
       }
 
 
+      boolean processAmbariIdentity = includeAmbariIdentity;
       Map<String, String> commandParameters = new HashMap<>();
       commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, ambariManagementController.getAuthName());
       commandParameters.put(KerberosServerAction.DEFAULT_REALM, kerberosDetails.getDefaultRealm());
@@ -3955,22 +3940,29 @@ public class KerberosHelperImpl implements KerberosHelper {
       }
       if (serviceComponentFilter != null) {
         commandParameters.put(KerberosServerAction.SERVICE_COMPONENT_FILTER, StageUtils.getGson().toJson(serviceComponentFilter));
+
+        processAmbariIdentity = serviceComponentFilter.containsKey("AMBARI") &&
+            ((serviceComponentFilter.get("AMBARI") == null) || serviceComponentFilter.get("AMBARI").contains("*") || serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER"));
       }
       if (hostFilter != null) {
         commandParameters.put(KerberosServerAction.HOST_FILTER, StageUtils.getGson().toJson(hostFilter));
+
+        processAmbariIdentity = hostFilter.contains("*") || hostFilter.contains(StageUtils.getHostName());
       }
       if (identityFilter != null) {
         commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identityFilter));
       }
 
-      commandParameters.put(KerberosServerAction.REGENERATE_ALL, (regenerateAllKeytabs) ? "true" : "false");
-      commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (includeAmbariIdentity) ? "true" : "false");
+      commandParameters.put(KerberosServerAction.OPERATION_TYPE, (operationType == null) ? KerberosServerAction.OperationType.DEFAULT.name() : operationType.name());
+      commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (processAmbariIdentity) ? "true" : "false");
 
       if (updateConfigurations) {
         commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATION_NOTE, "Updated Kerberos-related configurations");
         commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATIONS, "true");
       }
 
+      List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, forceAllHosts);
+
       // *****************************************************************
       // Create stage to create principals
       addPrepareKerberosIdentitiesStage(cluster, clusterHostInfoJson, hostParamsJson, event,
@@ -3979,9 +3971,9 @@ public class KerberosHelperImpl implements KerberosHelper {
       if (kerberosDetails.manageIdentities()) {
         commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
 
-        if (!regenerateAllKeytabs) {
-          addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson, event,
-              commandParameters, roleCommandOrder, requestStageContainer, serviceComponentHosts);
+        if (operationType != KerberosServerAction.OperationType.RECREATE_ALL) {
+          addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson,
+              commandParameters, roleCommandOrder, requestStageContainer, hostsToInclude);
         }
 
         // *****************************************************************
@@ -3996,15 +3988,15 @@ public class KerberosHelperImpl implements KerberosHelper {
 
         // *****************************************************************
         // Create stage to distribute and configure keytab for Ambari server and configure JAAS
-        if (includeAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
+        if (processAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
           addConfigureAmbariIdentityStage(cluster, clusterHostInfoJson, hostParamsJson, event, commandParameters,
               roleCommandOrder, requestStageContainer);
         }
 
         // *****************************************************************
         // Create stage to distribute keytabs
-        addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson,
-            hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+        addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+            roleCommandOrder, requestStageContainer, hostsToInclude);
       }
 
       if (updateConfigurations) {
@@ -4019,6 +4011,74 @@ public class KerberosHelperImpl implements KerberosHelper {
   }
 
   /**
+   * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
+   * <p/>
+   * It is expected that the supplied collection is modifiable. It will be modified inplace.
+   *
+   * @param serviceComponentHosts a collection of ServiceComponentHost items to test
+   * @param hosts                 a set of host names indicating valid hosts
+   * @return a collection of filtered ServiceComponentHost items
+   */
+  private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
+                                                                               Set<String> hosts) {
+
+    if ((serviceComponentHosts != null) && (hosts != null)) {
+      Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
+      while (iterator.hasNext()) {
+        ServiceComponentHost sch = iterator.next();
+
+        if (!hosts.contains(sch.getHostName())) {
+          iterator.remove();
+        }
+      }
+    }
+
+    return serviceComponentHosts;
+  }
+
+  /**
+   * Calculate the hosts to include when issuing agent-side commands.
+   * <p>
+   * If forcing all hosts, select only the healthy hosts in the cluster else select only the healthy
+   * hosts from the set of hosts specified in the collection of relevant {@link ServiceComponentHost}.
+   *
+   * @param cluster                      the cluster
+   * @param serviceComponentHosts        a collction of {@link ServiceComponentHost}s that are
+   *                                     relevant to the current operation
+   * @param hostsWithValidKerberosClient the collection of hosts know to have the Kerberos client
+   *                                     component installed
+   * @param forceAllHosts                true to process all hosts from the cluster rather than use
+   *                                     the hosts parsed from the set of {@link ServiceComponentHost}s
+   * @return a filtered list of host names
+   * @throws AmbariException
+   */
+  private List<String> calculateHosts(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts, Set<String> hostsWithValidKerberosClient, boolean forceAllHosts) throws AmbariException {
+    if(forceAllHosts) {
+      List<String> hosts = new ArrayList<>();
+      Collection<Host> clusterHosts = cluster.getHosts();
+      if(!CollectionUtils.isEmpty(clusterHosts)) {
+        for(Host host: clusterHosts) {
+          if(host.getState() == HostState.HEALTHY) {
+            hosts.add(host.getHostName());
+          }
+        }
+      }
+
+      return hosts;
+    }
+    else {
+      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
+          new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
+
+      if (filteredComponents.isEmpty()) {
+        return Collections.emptyList();
+      } else {
+        return createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      }
+    }
+  }
+
+  /**
    * DeletePrincipalsAndKeytabsHandler is an implementation of the Handler interface used to delete
    * principals and keytabs throughout the cluster.
    * <p/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
index 66bf7b3..cd23e83 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
@@ -133,7 +133,7 @@ public class RemovableIdentities {
    * Remove all identities which are not used by other services or components
    */
   public void remove(KerberosHelper kerberosHelper) throws AmbariException, KerberosOperationException {
-    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getName).collect(toSet());
+    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getPath).collect(toSet());
     if (!identitiesToRemove.isEmpty()) {
       kerberosHelper.deleteIdentities(cluster, components, identitiesToRemove);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
index 4396a2b..355f515 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
@@ -217,7 +217,7 @@ public class CreateKeytabFilesServerAction extends KerberosServerAction {
                 return commandReport;
               }
 
-              boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+              boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
               boolean onlyKeytabWrite = "true".equalsIgnoreCase(identityRecord.get(KerberosIdentityDataFileReader.ONLY_KEYTAB_WRITE));
               boolean grabKeytabFromCache = regenerateKeytabs && onlyKeytabWrite;
               // if grabKeytabFromCache=true we will try to get keytab from cache and send to agent, it will be true for

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
index 069c821..1c0853b9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
@@ -128,7 +128,7 @@ public class CreatePrincipalsServerAction extends KerberosServerAction {
       seenPrincipals.add(evaluatedPrincipal);
 
       boolean processPrincipal;
-      boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+      boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
 
       if (regenerateKeytabs) {
         // do not process cached identities that can be passed as is(headless identities)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
index c86ffa3..1b0f4fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -108,31 +109,32 @@ public abstract class KerberosServerAction extends AbstractServerAction {
    */
   public static final String DATA_DIRECTORY_PREFIX = ".ambari_";
 
-  /*
+  /**
    * Kerberos action shared data entry name for the principal-to-password map
    */
   private static final String PRINCIPAL_PASSWORD_MAP = "principal_password_map";
 
-  /*
+  /**
    * Kerberos action shared data entry name for the principal-to-key_number map
    */
   private static final String PRINCIPAL_KEY_NUMBER_MAP = "principal_key_number_map";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
-  */
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
+   */
   public static final String KEYTAB_CONTENT_BASE64 = "keytab_content_base64";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to generate key keytabs
-  * for all principals ("true") or only those that are missing ("false")
-  */
-  public static final String REGENERATE_ALL = "regenerate_all";
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand to indicate why type of creation operation to perform.
+   *
+   * @see OperationType
+   */
+  public static final String OPERATION_TYPE = "operation_type";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
-  * ("true") or ignore it ("false")
-  */
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
+   * ("true") or ignore it ("false")
+   */
   public static final String INCLUDE_AMBARI_IDENTITY = "include_ambari_identity";
 
   /**
@@ -219,6 +221,22 @@ public abstract class KerberosServerAction extends AbstractServerAction {
   }
 
   /**
+   * Given a (command parameter) Map, attempts to safely retrieve the "operation_type" property.
+   *
+   * @param commandParameters a Map containing the dictionary of data to interrogate
+   * @return an OperationType
+   */
+  protected static OperationType getOperationType(Map<String, String> commandParameters) {
+    String value = getCommandParameterValue(commandParameters, OPERATION_TYPE);
+    if(StringUtils.isEmpty(value)) {
+      return OperationType.DEFAULT;
+    }
+    else {
+      return OperationType.valueOf(value.toUpperCase());
+    }
+  }
+
+  /**
    * Sets the shared principal-to-password Map used to store principals and generated password for
    * use within the current request context.
    *
@@ -569,4 +587,29 @@ public abstract class KerberosServerAction extends AbstractServerAction {
       }
     }
   }
+
+  /**
+   * A Kerberos operation type
+   * <ul>
+   * <li>RECREATE_ALL - regenerate keytabs for all principals</li>
+   * <li>CREATE_MISSING - generate keytabs for only those that are missing</li>
+   * <li>DEFAULT - generate needed keytabs for new components</li>
+   * </ul>
+   */
+  public enum OperationType {
+    /**
+     * Regenerate keytabs for all principals
+     */
+    RECREATE_ALL,
+
+    /**
+     *  Generate keytabs for only those that are missing
+     */
+    CREATE_MISSING,
+
+    /**
+     * Generate needed keytabs for new components
+     */
+    DEFAULT
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
index f56e946..e1f8419 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
@@ -83,8 +83,7 @@ public class PrepareDisableKerberosServerAction extends AbstractPrepareKerberosS
     List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster,
         kerberosDescriptor,
         getServiceComponentFilter(),
-        null, identityFilter,
-      sch -> true);
+        null);
 
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
     Map<String, String> commandParameters = getCommandParameters();

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
index 3ec84fa..335451f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
@@ -92,8 +92,11 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
       }
     }
 
+    KerberosHelper kerberosHelper = getKerberosHelper();
+    Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+    Collection<String> hostFilter = getHostFilter();
     Collection<String> identityFilter = getIdentityFilter();
-    List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+    List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor, serviceComponentFilter, hostFilter);
 
     String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
@@ -107,7 +110,6 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
       actionLog.writeStdOut(String.format("Processing %d components", schCount));
     }
 
-    KerberosHelper kerberosHelper = getKerberosHelper();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
     Set<String> services = cluster.getServices().keySet();

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
index 49828cb..038d1b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
@@ -32,8 +32,12 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -67,11 +71,22 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
       throw new AmbariException("Missing cluster object");
     }
 
+    KerberosHelper kerberosHelper = getKerberosHelper();
+
     KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster, false);
+    Map<String, String> commandParameters = getCommandParameters();
+    OperationType operationType = getOperationType(getCommandParameters());
+
+    Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+    Collection<String> hostFilter = getHostFilter();
     Collection<String> identityFilter = getIdentityFilter();
-    List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+    // If the operationType is default, use the getServiceComponentHostsToProcess method to determine
+    // which ServiceComponentHosts to process based on the filters.  However if we are regenerating
+    // keytabs for a specific set of components, build the identity filter below so we can
+    // customized what needs to be done.
+    List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor,
+        (operationType == OperationType.DEFAULT) ? serviceComponentFilter : null, hostFilter);
 
-    Map<String, String> commandParameters = getCommandParameters();
     String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
 
@@ -84,18 +99,32 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
       actionLog.writeStdOut(String.format("Processing %d components", schCount));
     }
 
-    KerberosHelper kerberosHelper = getKerberosHelper();
     Set<String> services = cluster.getServices().keySet();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
     boolean includeAmbariIdentity = "true".equalsIgnoreCase(getCommandParameterValue(commandParameters, KerberosServerAction.INCLUDE_AMBARI_IDENTITY));
 
+    // If we are including the Ambari identity; then ensure that if a host filter is set, do not the Ambari service identity.
+    includeAmbariIdentity &= (hostFilter == null);
+
+    if (serviceComponentFilter != null) {
+      // If we are including the Ambari identity; then ensure that if a service/component filter is set,
+      // it contains the AMBARI/AMBARI_SERVER component; else do not include the Ambari service identity.
+      includeAmbariIdentity &= (serviceComponentFilter.get("AMBARI") != null) && serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER");
+
+      if((operationType != OperationType.DEFAULT)) {
+        // Update the identity filter, if necessary
+        identityFilter = updateIdentityFilter(kerberosDescriptor, identityFilter, serviceComponentFilter);
+      }
+    }
+
     // Calculate the current host-specific configurations. These will be used to replace
     // variables within the Kerberos descriptor data
     Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
     processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
-        configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore, !CollectionUtils.isEmpty(getHostFilter()));
+        configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore,
+        hostFilter != null);
 
     kerberosHelper.applyStackAdvisorUpdates(cluster, services, configurations, kerberosConfigurations,
         propertiesToIgnore, propertiesToRemove, true);
@@ -119,35 +148,6 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
   }
 
   /**
-   * Calls {@link KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)}
-   * with no filter on ServiceComponentHosts
-   * <p/>
-   * The <code>shouldProcessCommand</code> implementation passed to KerberosHelper#getServiceComponentHostsToProcess
-   * always returns true, indicating to process all ServiceComponentHosts.
-   *
-   * @param cluster            the cluster
-   * @param kerberosDescriptor the current Kerberos descriptor
-   * @param identityFilter     a list of identities to include, or all if null  @return the list of ServiceComponentHosts to process
-   * @throws AmbariException
-   * @see KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)
-   */
-  protected List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
-                                                                         KerberosDescriptor kerberosDescriptor,
-                                                                         Collection<String> identityFilter)
-      throws AmbariException {
-    return getKerberosHelper().getServiceComponentHostsToProcess(cluster,
-        kerberosDescriptor,
-        getServiceComponentFilter(),
-        getHostFilter(), identityFilter,
-        new KerberosHelper.Command<Boolean, ServiceComponentHost>() {
-          @Override
-          public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
-            return true;
-          }
-        });
-  }
-
-  /**
    * Calls {@link KerberosHelper#getKerberosDescriptor(Cluster, boolean)}
    *
    * @param cluster                 cluster instance
@@ -200,5 +200,81 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
           calculatedConfiguration, kerberosConfigurations, includePreconfiguredData);
     }
   }
+
+  /**
+   * Iterate through the identities in the Kerberos descriptor to find the relevant identities to
+   * add to the identity filter.
+   * <p>
+   * The set of identities to include in the filter are determined by whether they are explicit
+   * identities set in a component or service in the supplied service/component filter.
+   *
+   * @param kerberosDescriptor     the Kerberos descriptor
+   * @param identityFilter         the existing identity filter
+   * @param serviceComponentFilter the service/component filter
+   * @return a new collection of paths (including any existing paths) to act as the updated identity filter
+   */
+  private Collection<String> updateIdentityFilter(KerberosDescriptor kerberosDescriptor,
+                                                  Collection<String> identityFilter,
+                                                  Map<String, ? extends Collection<String>> serviceComponentFilter) {
+
+    Set<String> updatedFilter = (identityFilter == null) ? new HashSet<>() : new HashSet<>(identityFilter);
+
+    Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+
+    if (serviceDescriptors != null) {
+      for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
+        String serviceName = serviceDescriptor.getName();
+
+        if (serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
+          Collection<String> componentFilter = serviceComponentFilter.get(serviceName);
+          boolean anyComponent = ((componentFilter == null) || componentFilter.contains("*"));
+
+          // Only include the service-wide identities if the component filter is null contains "*", which indicates
+          // that all component for the given service are to be processed.
+          if (anyComponent) {
+            addIdentitiesToFilter(serviceDescriptor.getIdentities(), updatedFilter, true);
+          }
+
+          Map<String, KerberosComponentDescriptor> componentDescriptors = serviceDescriptor.getComponents();
+          if (componentDescriptors != null) {
+            for (KerberosComponentDescriptor componentDescriptor : componentDescriptors.values()) {
+              String componentName = componentDescriptor.getName();
+              if (anyComponent || (componentFilter.contains(componentName))) {
+                addIdentitiesToFilter(componentDescriptor.getIdentities(), updatedFilter, true);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    return updatedFilter;
+  }
+
+  /**
+   * Add the path of each identity in the collection of identities to the supplied identity filter
+   * if that identity is not a reference to another identity or if references are allowed.
+   *  @param identityDescriptors the collection of identity descriptors to process
+   * @param identityFilter      the identity filter to modify
+   * @param skipReferences
+   */
+  private void addIdentitiesToFilter(List<KerberosIdentityDescriptor> identityDescriptors,
+                                     Collection<String> identityFilter, boolean skipReferences) {
+    if (!CollectionUtils.isEmpty(identityDescriptors)) {
+      for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+        if (!skipReferences || !identityDescriptor.isReference()) {
+          String identityPath = identityDescriptor.getPath();
+
+          if (!StringUtils.isEmpty(identityPath)) {
+            identityFilter.add(identityPath);
+
+            // Find and add the references TO this identity to ensure the new/updated keytab file is
+            // sent to the appropriate host(s)
+            addIdentitiesToFilter(identityDescriptor.findReferences(), identityFilter, false);
+          }
+        }
+      }
+    }
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
index b496942..3a1eb4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
@@ -259,6 +259,31 @@ public abstract class AbstractKerberosDescriptor {
   }
 
   /**
+   * Calculate the path to this identity descriptor for logging purposes.
+   * Examples:
+   * <ul>
+   * <li>/</li>
+   * <li>/SERVICE</li>
+   * <li>/SERVICE/COMPONENT</li>
+   * <li>/SERVICE/COMPONENT/identity_name</li>
+   * </ul>
+   *
+   * @return a path
+   */
+  public String getPath() {
+    //
+    StringBuilder path = new StringBuilder();
+    AbstractKerberosDescriptor current = this;
+    while (current != null && (current.getName() != null)) {
+      path.insert(0, current.getName());
+      path.insert(0, '/');
+      current = current.getParent();
+    }
+
+    return path.toString();
+  }
+
+  /**
    * An enumeration of the different Kerberos (sub)descriptors for internal use.
    */
   public enum Type {

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
index 9ddb941..73550f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
@@ -862,22 +862,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
           referencedIdentity = getReferencedIdentityDescriptor(identity.getName());
 
           if(referencedIdentity != null) {
-            // Calculate the path to this identity descriptor for logging purposes.
-            // Examples:
-            //   /
-            //   /SERVICE
-            //   /SERVICE/COMPONENT
-            StringBuilder path = new StringBuilder();
-            AbstractKerberosDescriptor parent = identity.getParent();
-            while(parent != null && (parent.getName() != null)) {
-              path.insert(0, parent.getName());
-              path.insert(0, '/');
-              parent = parent.getParent();
-            }
-
             // Log this since it is deprecated...
             LOG.warn("Referenced identities should be declared using the identity's \"reference\" attribute, not the identity's \"name\" attribute." +
-                " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", path, identity.getName());
+                " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", identity.getPath(), identity.getName());
           }
         }
       } catch (AmbariException e) {
@@ -896,6 +883,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
       } else {
         dereferencedIdentity = new KerberosIdentityDescriptor(identity.toMap());
       }
+
+      // Force the path for this identity descriptor to be the same as the original identity descriptor's.
+      dereferencedIdentity.setPath(identity.getPath());
     }
 
     return dereferencedIdentity;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index ef45343..200a069 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,10 +17,15 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.base.Optional;
 
@@ -94,6 +99,8 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
    */
   private Predicate when = null;
 
+  private String path = null;
+
   /**
    * Creates a new KerberosIdentityDescriptor
    *
@@ -157,6 +164,47 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
   }
 
   /**
+   * Gets the absolute path to the referenced Kerberos identity definition
+   *
+   * @return the path to the referenced Kerberos identity definition or <code>null</code> if not set
+   */
+  public String getReferenceAbsolutePath() {
+    String absolutePath;
+    if(StringUtils.isEmpty(reference)) {
+      absolutePath = getName();
+    }
+    else {
+      absolutePath = reference;
+    }
+
+    if(!StringUtils.isEmpty(absolutePath) && !absolutePath.startsWith("/")) {
+      String path = getPath();
+      if(path == null) {
+        path = "";
+      }
+
+      if(absolutePath.startsWith("..")) {
+        AbstractKerberosDescriptor parent = getParent();
+        if(parent != null) {
+          parent = parent.getParent();
+
+          if(parent != null) {
+            absolutePath = absolutePath.replace("..", parent.getPath());
+          }
+        }
+      }
+      else if(absolutePath.startsWith(".")) {
+        AbstractKerberosDescriptor parent = getParent();
+        if (parent != null) {
+          absolutePath = absolutePath.replace(".", parent.getPath());
+        }
+      }
+    }
+
+    return absolutePath;
+  }
+
+  /**
    * Sets the path to the referenced Kerberos identity definition
    *
    * @param reference the path to the referenced Kerberos identity definition or <code>null</code>
@@ -356,6 +404,59 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+  /**
+   * Determines whether this {@link KerberosIdentityDescriptor} indicates it is a refrence to some
+   * other {@link KerberosIdentityDescriptor}.
+   * <p>
+   * A KerberosIdentityDescriptor is a reference if it's <code>reference</code> attibute is set
+   * or if (for backwards compatibility), its name indicates a path. For exmaple:
+   * <ul>
+   * <li><code>SERVICE/COMPONENT/identitiy_name</code></li>
+   * <li><code>/identity_name</code></li>
+   * <li><code>./identity_name</code></li>
+   * </ul>
+   *
+   * @return true if this {@link KerberosIdentityDescriptor} indicates a reference; otherwise false
+   */
+  public boolean isReference() {
+    String name = getName();
+    return !StringUtils.isEmpty(reference) ||
+        (!StringUtils.isEmpty(name) && (name.startsWith("/") || name.startsWith("./")));
+  }
+
+  /**
+   * Calculate the path to this identity descriptor for logging purposes.
+   * Examples:
+   * /
+   * /SERVICE
+   * /SERVICE/COMPONENT
+   * /SERVICE/COMPONENT/identity_name
+   * <p>
+   * This implementation calculates and caches the path if the path has not been previously set.
+   *
+   * @return a path
+   */
+  @Override
+  public String getPath() {
+    if (path == null) {
+      path = super.getPath();
+    }
+
+    return path;
+  }
+
+  /**
+   * Explicitly set the path to this {@link KerberosIdentityDescriptor}.
+   * <p>
+   * This is useful when creating detached identity descriptors while dereferencing identity references
+   * so that the path information is not lost.
+   *
+   * @param path a path
+   */
+  void setPath(String path) {
+    this.path = path;
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +
@@ -406,4 +507,63 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
       return false;
     }
   }
+
+  /**
+   * Find all of the {@link KerberosIdentityDescriptor}s that reference this {@link KerberosIdentityDescriptor}
+   *
+   * @return a list of {@link KerberosIdentityDescriptor}s
+   */
+  public List<KerberosIdentityDescriptor> findReferences() {
+    AbstractKerberosDescriptor root = getRoot();
+    if(root instanceof AbstractKerberosDescriptorContainer) {
+      return findIdentityReferences((AbstractKerberosDescriptorContainer)root, getPath());
+    }
+    else {
+      return null;
+    }
+  }
+
+  /**
+   * Given a root, recursively traverse the tree of {@link AbstractKerberosDescriptorContainer}s looking for
+   * {@link KerberosIdentityDescriptor}s that declare the given path as the referenced Kerberos identity.
+   *
+   * @param root the starting point
+   * @param path the path to the referenced {@link KerberosIdentityDescriptor} in the {@link KerberosDescriptor}
+   * @return a list of {@link KerberosIdentityDescriptor}s
+   */
+  private List<KerberosIdentityDescriptor> findIdentityReferences(AbstractKerberosDescriptorContainer root, String path) {
+    if (root == null) {
+      return null;
+    }
+
+    List<KerberosIdentityDescriptor> references = new ArrayList<>();
+
+    // Process the KerberosIdentityDescriptors found in this node.
+    List<KerberosIdentityDescriptor> identityDescriptors = root.getIdentities();
+    if (identityDescriptors != null) {
+      for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+        if (identityDescriptor.isReference()) {
+          String reference = identityDescriptor.getReferenceAbsolutePath();
+
+          if (!StringUtils.isEmpty(reference) && path.equals(reference)) {
+            references.add(identityDescriptor);
+          }
+        }
+      }
+    }
+
+    // Process the children of the node
+    Collection<? extends AbstractKerberosDescriptorContainer> children = root.getChildContainers();
+    if(!CollectionUtils.isEmpty(children)) {
+      for (AbstractKerberosDescriptorContainer child : children) {
+        Collection<KerberosIdentityDescriptor> childReferences = findIdentityReferences(child, path);
+        if (!CollectionUtils.isEmpty(childReferences)) {
+          // If references were found in the current child, add them to this node's list of references.
+          references.addAll(childReferences);
+        }
+      }
+    }
+
+    return references;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 60d7fd9..7ed52d2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -1441,11 +1441,6 @@ public class KerberosHelperTest extends EasyMockSupport {
         .andReturn(Collections.singletonList(schKerberosClient))
         .once();
 
-    final Clusters clusters = injector.getInstance(Clusters.class);
-    expect(clusters.getHost("host1"))
-        .andReturn(host)
-        .once();
-
     final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, null))
         .andReturn(Collections.emptyMap())

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
index 663934f..2518da9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -77,7 +77,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Test
   public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1", "oozie_server2"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1", "/OOZIE/OOZIE_SERVER/oozie_server2"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -95,7 +95,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
     installComponent(OOZIE_2, OOZIE_SERVER_2, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -106,7 +106,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
     installComponent(YARN, RESOURCE_MANAGER, HOST);
     installComponent(YARN_2, RESOURCE_MANAGER_2, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("rm_unique"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("/YARN/RESOURCE_MANAGER/rm_unique"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
@@ -133,7 +133,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Test
   public void removesServiceIdentitiesSkipComponentIdentitiesAfterServiceWasUninstalled() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
-    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("hdfs-service"));
+    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("/HDFS/hdfs-service"));
     expectLastCall().once();
     replayAll();
     uninstallService(HDFS, hdfsComponents());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af1e539/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
index d6bef02..079096d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -79,6 +80,9 @@ public class KerberosDescriptorTest {
           "        {" +
           "          \"name\": \"service1_spnego\"," +
           "          \"reference\": \"/spnego\"" +
+          "        }," +
+          "        {" +
+          "          \"name\": \"service1_identity\"" +
           "        }" +
           "      ]," +
           "      \"name\": \"SERVICE1\"" +
@@ -87,6 +91,39 @@ public class KerberosDescriptorTest {
           "      \"identities\": [" +
           "        {" +
           "          \"name\": \"/spnego\"" +
+          "        }," +
+          "        {" +
+          "          \"name\": \"service2_identity\"" +
+          "        }" +
+          "      ]," +
+          "      \"components\": [" +
+          "        {" +
+          "          \"identities\": [" +
+          "            {" +
+          "              \"name\": \"component1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_service1_identity\"," +
+          "              \"reference\": \"/SERVICE1/service1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_component1_identity\"," +
+          "              \"reference\": \"./component1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_service2_identity\"," +
+          "              \"reference\": \"../service2_identity\"" +
+          "            }" +
+          "          ]," +
+          "          \"name\": \"COMPONENT21\"" +
+          "        }," +
+          "        {" +
+          "          \"identities\": [" +
+          "            {" +
+          "              \"name\": \"component2_identity\"" +
+          "            }" +
+          "          ]," +
+          "          \"name\": \"COMPONENT22\"" +
           "        }" +
           "      ]," +
           "      \"name\": \"SERVICE2\"" +
@@ -547,15 +584,118 @@ public class KerberosDescriptorTest {
     // Reference is determined using the "reference" attribute
     serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
     identities = serviceDescriptor.getIdentities(true, null);
-    Assert.assertEquals(1, identities.size());
-    Assert.assertEquals("service1_spnego", identities.get(0).getName());
-    Assert.assertEquals("/spnego", identities.get(0).getReference());
+    Assert.assertEquals(2, identities.size());
+    for (KerberosIdentityDescriptor identity : identities) {
+      if (identity.isReference()) {
+        Assert.assertEquals("service1_spnego", identity.getName());
+        Assert.assertEquals("/spnego", identity.getReference());
+      } else {
+        Assert.assertEquals("service1_identity", identity.getName());
+        Assert.assertNull(identity.getReference());
+      }
+    }
+
+    Assert.assertEquals("service1_identity", identities.get(1).getName());
+    Assert.assertNull(identities.get(1).getReference());
 
     // Reference is determined using the "name" attribute
     serviceDescriptor = kerberosDescriptor.getService("SERVICE2");
     identities = serviceDescriptor.getIdentities(true, null);
+    Assert.assertEquals(2, identities.size());
+    for (KerberosIdentityDescriptor identity : identities) {
+      if (identity.isReference()) {
+        Assert.assertEquals("/spnego", identity.getName());
+        Assert.assertNull(identity.getReference());
+      } else {
+        Assert.assertEquals("service2_identity", identity.getName());
+        Assert.assertNull(identity.getReference());
+      }
+    }
+  }
+
+  @Test
+  public void testGetPath() throws Exception {
+    KerberosDescriptor kerberosDescriptor;
+    KerberosServiceDescriptor serviceDescriptor;
+    List<KerberosIdentityDescriptor> identities;
+
+    kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE);
+
+    serviceDescriptor = kerberosDescriptor.getService("SERVICE_NAME");
+    identities = serviceDescriptor.getIdentities(false, null);
+    Assert.assertEquals(1, identities.size());
+    Assert.assertEquals("/SERVICE_NAME/identity_1", identities.get(0).getPath());
+
+    KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent("COMPONENT_NAME");
+    identities = componentDescriptor.getIdentities(false, null);
     Assert.assertEquals(1, identities.size());
-    Assert.assertEquals("/spnego", identities.get(0).getName());
-    Assert.assertNull(identities.get(0).getReference());
+    Assert.assertEquals("/SERVICE_NAME/COMPONENT_NAME/identity_1", identities.get(0).getPath());
+
+
+    kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+
+    serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
+    identities = serviceDescriptor.getIdentities(true, null);
+    Assert.assertEquals(2, identities.size());
+    Assert.assertEquals("/SERVICE1/service1_spnego", identities.get(0).getPath());
+    Assert.assertEquals("/SERVICE1/service1_identity", identities.get(1).getPath());
+  }
+
+  @Test
+  public void testGetReferences() throws Exception {
+    KerberosDescriptor kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+    KerberosIdentityDescriptor identity;
+    List<KerberosIdentityDescriptor> references;
+    Set<String> paths;
+
+    // Find all references to /spnego
+    identity = kerberosDescriptor.getIdentity("spnego");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(2, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE1/service1_spnego"));
+    Assert.assertTrue(paths.contains("/SERVICE2//spnego"));
+
+    // Find all references to /SERVICE1/service1_identity
+    identity = kerberosDescriptor.getService("SERVICE1").getIdentity("service1_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service1_identity"));
+
+    // Find all references to /SERVICE2/COMPONENT21/component1_identity (testing ./)
+    identity = kerberosDescriptor.getService("SERVICE2").getComponent("COMPONENT21").getIdentity("component1_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_component1_identity"));
+
+    // Find all references to /SERVICE2/component2_identity (testing ../)
+    identity = kerberosDescriptor.getService("SERVICE2").getIdentity("service2_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service2_identity"));
+  }
+
+  private Set<String> collectPaths(List<KerberosIdentityDescriptor> identityDescriptors) {
+    Set<String> paths = new HashSet<>();
+    for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+      paths.add(identityDescriptor.getPath());
+    }
+    return paths;
   }
+
 }
\ No newline at end of file


[35/50] [abbrv] ambari git commit: Revert "Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)""

Posted by ja...@apache.org.
Revert "Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)""

This reverts commit 7e0fe2913619f4b70097e3ebcd7fb89e84eee62d.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4242225c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4242225c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4242225c

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 4242225ca6aa89512886bcc74eb5c49e65bd259c
Parents: 0ca8513
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 9 13:03:49 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 9 13:04:36 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/config.js                        |  6 ++--
 ambari-web/app/routes/add_service_routes.js     |  2 +-
 ambari-web/app/routes/main.js                   |  2 +-
 .../app/templates/main/admin/kerberos.hbs       | 34 +++++++++++---------
 .../main/service/all_services_actions.hbs       |  6 ++--
 ambari-web/app/views/main/admin.js              | 14 ++++----
 .../main/admin/stack_upgrade/services_view.js   |  2 +-
 ambari-web/app/views/main/menu.js               | 16 +++++----
 ambari-web/app/views/main/service/item.js       |  2 +-
 .../admin/stack_upgrade/services_view_test.js   |  1 +
 10 files changed, 49 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index ba1b75d..0963f70 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -86,9 +86,11 @@ App.supports = {
   addingNewRepository: false,
   kerberosStackAdvisor: true,
   logCountVizualization: false,
-  manageJournalNode: true,
   createAlerts: false,
-  enabledWizardForHostOrderedUpgrade: true
+  enabledWizardForHostOrderedUpgrade: true,
+  manageJournalNode: true,
+  enableToggleKerberos: true,
+  enableAddDeleteServices: true
 };
 
 if (App.enableExperimental) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 1615f0d..75b3586 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -24,7 +24,7 @@ module.exports = App.WizardRoute.extend({
   route: '/service/add',
 
   enter: function (router) {
-    if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
+    if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') && App.supports.enableAddDeleteServices) {
       // `getSecurityStatus` call is required to retrieve information related to kerberos type: Manual or automated kerberos
       router.get('mainController').isLoading.call(router.get('clusterController'),'isClusterNameLoaded').done(function () {
         App.router.get('mainAdminKerberosController').getSecurityStatus().always(function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/routes/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 30cc8aa..7ed18de 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -460,7 +460,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
 
       route: '/kerberos',
       enter: function (router, transition) {
-        if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.TOGGLE_KERBEROS')) {
+        if (router.get('loggedIn') && (!App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || !App.supports.enableToggleKerberos)) {
           router.transitionTo('main.dashboard.index');
         }
       },

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index e7bb618..2b41122 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -20,20 +20,22 @@
     <div>
       <p class="text-success">{{t admin.security.enabled}}
         {{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
-          <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
-          {{#unless isManualKerberos}}
-            <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
-              <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
-            {{#if App.isCredentialStorePersistent}}
-              <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+            {{#if App.supports.enableToggleKerberos}}
+              <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
+              {{#unless isManualKerberos}}
+                <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+                  <i class="glyphicon glyphicon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
+                {{#if App.isCredentialStorePersistent}}
+                  <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+                {{/if}}
+              {{/unless}}
+              <br/>
+              {{#unless isEditMode}}
+                <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
+                  {{t common.edit}}
+                </a>
+              {{/unless}}
             {{/if}}
-          {{/unless}}
-          <br/>
-          {{#unless isEditMode}}
-            <a href="#" {{action makeConfigsEditable target="controller"}} class="pull-right">
-              {{t common.edit}}
-            </a>
-          {{/unless}}
         {{/isAuthorized}}
       </p>
     </div>
@@ -51,8 +53,10 @@
     <div>
       <p class="muted background-text">{{t admin.security.disabled}}
       {{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
-        <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
-        <br/>
+        {{#if App.supports.enableToggleKerberos}}
+          <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
+          <br/>
+        {{/if}}
       {{/isAuthorized}}
       </p>
     </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/templates/main/service/all_services_actions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/all_services_actions.hbs b/ambari-web/app/templates/main/service/all_services_actions.hbs
index 3e87cb2..a9e122b 100644
--- a/ambari-web/app/templates/main/service/all_services_actions.hbs
+++ b/ambari-web/app/templates/main/service/all_services_actions.hbs
@@ -22,12 +22,14 @@
   </div>
   <ul class="dropdown-menu">
     {{#isAuthorized "SERVICE.ADD_DELETE_SERVICES"}}
-      <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
+      {{#if App.supports.enableAddDeleteServices}}
+        <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}>
         <a href="#"
           {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
           {{action gotoAddService target="view.serviceController"}}>
           <i class="glyphicon glyphicon-plus"></i> {{t services.service.add}}</a>
-      </li>
+        </li>
+      {{/if}}
     {{/isAuthorized}}
     {{#isAuthorized "SERVICE.START_STOP"}}
       <li class="divider"></li>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/admin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 509f380..05d0f56 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -39,12 +39,14 @@ App.MainAdminView = Em.View.extend({
       });
     }
     if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding')) ) {
-      items.push({
-        name: 'kerberos',
-        url: 'adminKerberos.index',
-        label: Em.I18n.t('common.kerberos'),
-        disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
-      });
+      if (App.supports.enableToggleKerberos) {
+        items.push({
+          name: 'kerberos',
+          url: 'adminKerberos.index',
+          label: Em.I18n.t('common.kerberos'),
+          disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+        });
+      }
     }
     if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
       if (App.supports.serviceAutoStart) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
index f566814..25efffe 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
@@ -56,7 +56,7 @@ App.MainAdminStackServicesView = Em.View.extend({
    * @param event
    */
   goToAddService: function (event) {
-    if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
+    if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') || !App.supports.enableAddDeleteServices) {
       return;
     } else if (event.context == "KERBEROS") {
       App.router.get('mainAdminKerberosController').checkAndStartKerberosWizard();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 4bb53ae..32c4f6f 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -118,13 +118,15 @@ App.MainSideMenuView = Em.CollectionView.extend({
           });
         }
         if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || upg) {
-          categories.push({
-            name: 'kerberos',
-            url: 'kerberos/',
-            label: Em.I18n.t('common.kerberos'),
-            disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
-            href: router.urlFor('main.admin.adminKerberos')
-          });
+          if (App.supports.enableToggleKerberos) {
+            categories.push({
+              name: 'kerberos',
+              url: 'kerberos/',
+              label: Em.I18n.t('common.kerberos'),
+              disabled: App.get('upgradeInProgress') || App.get('upgradeHolding'),
+              href: router.urlFor('main.admin.adminKerberos')
+            });
+          }
         }
         if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || upg) {
           if (App.supports.serviceAutoStart) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index 37e0904..45c783b 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -289,7 +289,7 @@ App.MainServiceItemView = Em.View.extend({
       options.push(actionMap.DOWNLOAD_CLIENT_CONFIGS);
     }
 
-    if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES")) {
+    if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES") && App.supports.enableAddDeleteServices) {
       options.push(actionMap.DELETE_SERVICE);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4242225c/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
index 70d182c..da75cf2 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
@@ -34,6 +34,7 @@ describe('App.MainAdminStackServicesView', function () {
       sinon.stub(App.router, 'get').returns(mock);
       sinon.spy(mock, 'checkAndStartKerberosWizard');
       isAccessibleMock = sinon.stub(App, 'isAuthorized');
+      App.set('supports.enableAddDeleteServices', true);
     });
     afterEach(function() {
       App.get('router').transitionTo.restore();


[45/50] [abbrv] ambari git commit: AMBARI-22186 Navigation style changes. (atkach)

Posted by ja...@apache.org.
AMBARI-22186 Navigation style changes. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2d23e123
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2d23e123
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2d23e123

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 2d23e123fd9b514b8c21d973cb0237e8c3dd6b42
Parents: 5768294
Author: Andrii Tkach <at...@apache.org>
Authored: Tue Oct 10 16:44:35 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Tue Oct 10 18:00:24 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/common.less               |  1 +
 .../app/styles/theme/bootstrap-ambari.css       | 64 ++++++++++++++------
 ambari-web/app/styles/top-nav.less              | 22 +++++++
 ambari-web/app/templates/application.hbs        | 39 ++++++++----
 ambari-web/app/templates/main/service/item.hbs  |  2 +-
 5 files changed, 96 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/common.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/common.less b/ambari-web/app/styles/common.less
index ddd4f3a..7753a59 100644
--- a/ambari-web/app/styles/common.less
+++ b/ambari-web/app/styles/common.less
@@ -71,6 +71,7 @@
 @top-nav-menu-dropdown-border-color: #c3c3c3;
 @top-nav-menu-dropdown-bg-color: #fff;
 @top-nav-menu-dropdown-text-color: #333;
+@top-nav-menu-views-menu-color: #1491c1;
 
 @-webkit-keyframes orangePulse {
   from { background-color: @restart-indicator-color; }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index a223949..70579e7 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -464,7 +464,7 @@ h2.table-title {
 .nav.nav-tabs li a {
   border-width: 0;
   border-radius: 0;
-  border-bottom: 2px solid transparent;
+  border-bottom: 3px solid transparent;
   color: #6B6C6C;
   text-transform: uppercase;
 }
@@ -488,7 +488,7 @@ h2.table-title {
 .nav-tabs-left li,
 .nav-tabs-right li {
   float: none;
-  margin-bottom: 2px;
+  margin-bottom: 3px;
 }
 .nav-tabs-left li a,
 .nav-tabs-right li a {
@@ -498,25 +498,25 @@ h2.table-title {
   margin-right: -1px;
 }
 .nav-tabs-left li a {
-  border: 2px solid transparent !important;
+  border: 3px solid transparent !important;
 }
 .nav-tabs-left li.active a,
 .nav-tabs-left li.active a:hover,
 .nav-tabs-left li.active a:active,
 .nav-tabs-left li.active a:focus {
-  border-right: 2px solid #3FAE2A !important;
+  border-right: 3px solid #3FAE2A !important;
 }
 .nav-tabs-right li {
   margin-left: -1px;
 }
 .nav-tabs-right li a {
-  border: 2px solid transparent !important;
+  border: 3px solid transparent !important;
 }
 .nav-tabs-right li.active a,
 .nav-tabs-right li.active a:hover,
 .nav-tabs-right li.active a:active,
 .nav-tabs-right li.active a:focus {
-  border-left: 2px solid #3FAE2A !important;
+  border-left: 3px solid #3FAE2A !important;
 }
 .wizard {
   border: 2px solid #ebecf1;
@@ -797,8 +797,7 @@ input.radio:checked + label:after {
   cursor: pointer;
   margin-top: 3px;
 }
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header,
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.toggle-icon {
+.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header {
   color: #fff;
 }
 .navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group span.ambari-header {
@@ -890,7 +889,7 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a .navigation-icon {
   line-height: 18px;
-  font-size: 14px;
+  font-size: 16px;
   color: #b8bec4;
 }
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a .toggle-icon,
@@ -904,12 +903,14 @@ input.radio:checked + label:after {
   color: #b8bec4;
   padding: 3px 5px 3px 10px;
 }
-.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
-.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a,
 .navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a,
 .navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a {
   padding: 10px 5px 10px 20px;
 }
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a {
+  padding: 14px 5px 14px 20px;
+}
 .navigation-bar-container ul.nav.side-nav-menu li.submenu-li > a,
 .navigation-bar-container ul.nav.side-nav-footer li.submenu-li > a {
   padding: 10px 5px 10px 25px;
@@ -922,7 +923,7 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
   color: #3fae2a;
-  font-size: 20px;
+  font-size: 19px;
   position: relative;
   padding: 0 15px;
   left: calc(30%);
@@ -1021,7 +1022,7 @@ input.radio:checked + label:after {
   position: absolute;
   pointer-events: none;
   border-color: transparent;
-  border-left-color: #31823a;
+  border-left-color: #3fae2a;
   margin-top: -12px;
 }
 .navigation-bar-container ul.nav.side-nav-menu .more-actions,
@@ -1097,6 +1098,10 @@ input.radio:checked + label:after {
 .navigation-bar-container.collapsed ul.nav.side-nav-footer li a .toggle-icon {
   display: none;
 }
+.navigation-bar-container.collapsed ul.nav.side-nav-menu li a .navigation-icon,
+.navigation-bar-container.collapsed ul.nav.side-nav-footer li a .navigation-icon {
+  font-size: 19px;
+}
 .navigation-bar-container.collapsed ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
 .navigation-bar-container.collapsed ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
   padding: 0 5px;
@@ -1147,7 +1152,7 @@ input.radio:checked + label:after {
   position: absolute;
   pointer-events: none;
   border-color: transparent;
-  border-left-color: #31823a;
+  border-left-color: #3fae2a;
   margin-top: -12px;
 }
 .navigation-bar-container.collapsed ul.nav.side-nav-menu .more-actions,
@@ -1189,8 +1194,10 @@ input.radio:checked + label:after {
   position: relative;
   top: 1px;
 }
+.notifications-dropdown,
 #notifications-dropdown.dropdown-menu {
-  width: 400px;
+  min-width: 400px;
+  max-width: 400px;
   min-height: 150px;
   padding: 0px;
   z-index: 1000;
@@ -1202,6 +1209,7 @@ input.radio:checked + label:after {
   -moz-box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
   box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
 }
+.notifications-dropdown .popup-arrow-up,
 #notifications-dropdown.dropdown-menu .popup-arrow-up {
   position: absolute;
   right: 37px;
@@ -1210,6 +1218,7 @@ input.radio:checked + label:after {
   height: 40px;
   overflow: hidden;
 }
+.notifications-dropdown .popup-arrow-up:after,
 #notifications-dropdown.dropdown-menu .popup-arrow-up:after {
   content: "";
   position: absolute;
@@ -1221,10 +1230,12 @@ input.radio:checked + label:after {
   left: 10px;
   box-shadow: -1px -1px 10px -2px rgba(0, 0, 0, 0.5);
 }
+.notifications-dropdown .notifications-header,
 #notifications-dropdown.dropdown-menu .notifications-header {
   border-bottom: 1px solid #eee;
   padding: 15px 20px;
 }
+.notifications-dropdown .notifications-header .notifications-title,
 #notifications-dropdown.dropdown-menu .notifications-header .notifications-title {
   font-family: 'Roboto', sans-serif;
   font-weight: normal;
@@ -1233,19 +1244,23 @@ input.radio:checked + label:after {
   color: #333;
   font-size: 16px;
 }
+.notifications-dropdown .notifications-body,
 #notifications-dropdown.dropdown-menu .notifications-body {
   padding: 0px 15px;
   overflow: auto;
   max-height: 500px;
 }
+.notifications-dropdown .notifications-body .no-alert-text,
 #notifications-dropdown.dropdown-menu .notifications-body .no-alert-text {
   padding: 15px 5px;
 }
+.notifications-dropdown .notifications-body .table-controls,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls {
   padding: 10px 0px;
   margin: 0px;
   border-bottom: 1px solid #eee;
 }
+.notifications-dropdown .notifications-body .table-controls .state-filter,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter {
   padding: 0px;
   font-family: 'Roboto', sans-serif;
@@ -1257,36 +1272,45 @@ input.radio:checked + label:after {
   color: #666;
   position: relative;
 }
+.notifications-dropdown .notifications-body .table-controls .state-filter .form-control.filter-select,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter .form-control.filter-select {
   font-size: 12px;
   color: #666;
   height: 25px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table {
   margin-top: 0px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr {
   cursor: pointer;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover {
   cursor: default;
   border-color: transparent;
   border-bottom-color: #eee;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td {
   border-color: transparent;
   background-color: white;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status {
   width: 9%;
   padding: 15px 3px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL {
   color: #EF6162;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING {
   color: #E98A40;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content {
   width: 90%;
   padding: 15px 3px 10px 3px;
@@ -1297,12 +1321,14 @@ input.radio:checked + label:after {
   color: #333;
   line-height: 1.3;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .name,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .name {
   font-weight: bold;
   font-size: 14px;
   color: #333;
   margin-bottom: 5px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .description,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .description {
   font-size: 12px;
   color: #666;
@@ -1327,11 +1353,13 @@ input.radio:checked + label:after {
   -webkit-hyphens: auto;
   hyphens: auto;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .timestamp,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .timestamp {
   text-align: right;
   font-size: 11px;
   color: #999;
 }
+.notifications-dropdown .notifications-footer,
 #notifications-dropdown.dropdown-menu .notifications-footer {
   border-top: 1px solid #eee;
   padding: 15px;
@@ -1372,13 +1400,13 @@ input.radio:checked + label:after {
 }
 .accordion .panel-group,
 .wizard .wizard-body .wizard-content .accordion .panel-group {
-  margin-bottom: 0px;
+  margin-bottom: 0;
 }
 .accordion .panel-group .panel,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel {
-  border-radius: 0px;
+  border-radius: 0;
   border: none;
-  margin-top: 0px;
+  margin-top: 0;
   padding: 0 10px;
 }
 .accordion .panel-group .panel .panel-heading,

http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/styles/top-nav.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/top-nav.less b/ambari-web/app/styles/top-nav.less
index 0644e52..a75d08a 100644
--- a/ambari-web/app/styles/top-nav.less
+++ b/ambari-web/app/styles/top-nav.less
@@ -109,6 +109,28 @@
     .top-nav-user {
       margin-top: 2px;
     }
+    .ambari-views {
+      margin-top: 17px;
+      padding: 0 20px 0 10px;
+      .notifications-dropdown.dropdown-menu {
+        right: -28px;
+        min-width: 200px;
+        max-width: 300px;
+        min-height: 100px;
+        li  {
+          padding: 2px 5px;
+          a {
+            font-size: 12px;
+            color: @top-nav-menu-views-menu-color;
+          }
+        }
+      }
+      i {
+        font-size: 20px;
+        color: @top-nav-menu-views-menu-color;
+        cursor: pointer;
+      }
+    }
   }
 
   #notifications-dropdown.dropdown-menu {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/templates/application.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/application.hbs b/ambari-web/app/templates/application.hbs
index 03c47db..5c47406 100644
--- a/ambari-web/app/templates/application.hbs
+++ b/ambari-web/app/templates/application.hbs
@@ -35,19 +35,7 @@
           </a>
         {{/if}}
         <div class="btn-group">
-          <div class="dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
-            <span class="ambari-header" title="Apache Ambari" {{QAAttr "ambari-title"}}>{{t app.name}}</span>
-            <span class="toggle-icon glyphicon glyphicon-triangle-bottom"></span>
-          </div>
-          <ul class="dropdown-menu">
-            {{#if view.views.length}}
-              {{#each item in view.views}}
-                <li><a class="" href="#" {{action "setView" item target="App.router.mainViewsController"}}>{{item.label}}</a></li>
-              {{/each}}
-            {{else}}
-              <li class="disabled"><a href="javascript:void(null);">{{t menu.item.views.noViews}}</a></li>
-            {{/if}}
-          </ul>
+          <span class="ambari-header" title="Apache Ambari" {{QAAttr "ambari-title"}}>{{t app.name}}</span>
         </div>
       </li>
     </ul>
@@ -121,6 +109,31 @@
         {{/if}}
         {{! user dropdown end }}
 
+        {{! views menu}}
+        <div class="navbar-nav navbar-right ambari-views notifications-group">
+          {{#if enableLinks}}
+            <i class="icon-th dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"></i>
+            <ul class="notifications-dropdown row dropdown-menu">
+              <div class="popup-arrow-up"></div>
+              <div class="notifications-header col-sm-12">
+                <div class="notifications-title">{{t common.views}}</div>
+              </div>
+              <div class="notifications-body col-sm-12">
+                {{#if view.views.length}}
+                  {{#each item in view.views}}
+                    <li>
+                      <a href="#" {{action "setView" item target="App.router.mainViewsController"}}>{{item.label}}</a>
+                    </li>
+                  {{/each}}
+                {{else}}
+                  <li class="disabled"><a href="javascript:void(null);">{{t menu.item.views.noViews}}</a></li>
+                {{/if}}
+              </div>
+            </ul>
+          {{/if}}
+        </div>
+        {{!views menu end}}
+
         <div class="navbar-nav navbar-right cluster-notifications">
           {{#if enableLinks}}
             {{! bg label }}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2d23e123/ambari-web/app/templates/main/service/item.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/item.hbs b/ambari-web/app/templates/main/service/item.hbs
index df26a9d..9abcaf3 100644
--- a/ambari-web/app/templates/main/service/item.hbs
+++ b/ambari-web/app/templates/main/service/item.hbs
@@ -25,7 +25,7 @@
 <div class="service-button">
   {{#if view.isMaintenanceActive}}
     <div class="btn-group display-inline-block">
-      <button class="btn btn-default dropdown-toggle" id="service-actions-dropdown-btn" data-toggle="dropdown" href="#">
+      <button class="btn btn-success dropdown-toggle" id="service-actions-dropdown-btn" data-toggle="dropdown" href="#">
         {{t common.actions}}
         <span class="caret"></span>
       </button>


[36/50] [abbrv] ambari git commit: AMBARI-22166 - Not able to perform revert after deleting the upgraded service (jonathanhurley)

Posted by ja...@apache.org.
AMBARI-22166 - Not able to perform revert after deleting the upgraded service (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8cffd722
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8cffd722
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8cffd722

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 8cffd72227c22da2bc3bad30f1c3e877bd26cad4
Parents: 4242225
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 13:21:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 16:40:45 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/state/UpgradeContext.java     | 24 ++++++--
 .../ambari/server/state/UpgradeContextTest.java | 60 +++++++++++++++++++-
 2 files changed, 76 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8cffd722/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 67a8950..de0f868 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -304,8 +304,8 @@ public class UpgradeContext {
         throw new AmbariException(
             String.format("There are no upgrades for cluster %s which are marked as revertable",
                 cluster.getClusterName()));
-      }      
-      
+      }
+
       if (!revertUpgrade.getOrchestration().isRevertable()) {
         throw new AmbariException(String.format("The %s repository type is not revertable",
             revertUpgrade.getOrchestration()));
@@ -323,14 +323,26 @@ public class UpgradeContext {
             revertableUpgrade.getRepositoryVersion().getVersion()));
       }
 
+      // !!! build all service-specific reversions
       Set<RepositoryVersionEntity> priors = new HashSet<>();
+      Map<String, Service> clusterServices = cluster.getServices();
       for (UpgradeHistoryEntity history : revertUpgrade.getHistory()) {
+        String serviceName = history.getServiceName();
+        String componentName = history.getComponentName();
+
         priors.add(history.getFromReposistoryVersion());
 
-        // !!! build all service-specific
-        m_services.add(history.getServiceName());
-        m_sourceRepositoryMap.put(history.getServiceName(), history.getTargetRepositoryVersion());
-        m_targetRepositoryMap.put(history.getServiceName(), history.getFromReposistoryVersion());
+        // if the service is no longer installed, do nothing
+        if (!clusterServices.containsKey(serviceName)) {
+          LOG.warn("{}/{} will not be reverted since it is no longer installed in the cluster",
+              serviceName, componentName);
+
+          continue;
+        }
+
+        m_services.add(serviceName);
+        m_sourceRepositoryMap.put(serviceName, history.getTargetRepositoryVersion());
+        m_targetRepositoryMap.put(serviceName, history.getFromReposistoryVersion());
       }
 
       if (priors.size() != 1) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8cffd722/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
index dc77fa6..5176ffe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
@@ -23,6 +23,7 @@ import static junit.framework.Assert.assertTrue;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -111,6 +112,11 @@ public class UpgradeContextTest extends EasyMockSupport {
   private VersionDefinitionXml m_vdfXml;
 
   /**
+   * The upgrade history to return for the completed upgrade.
+   */
+  private List<UpgradeHistoryEntity> m_upgradeHistory = new ArrayList<>();
+
+  /**
    * The cluster services.
    */
   private Map<String, Service> m_services = new HashMap<>();
@@ -128,7 +134,7 @@ public class UpgradeContextTest extends EasyMockSupport {
     expect(upgradeHistoryEntity.getServiceName()).andReturn(HDFS_SERVICE_NAME).anyTimes();
     expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
     expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
-    List<UpgradeHistoryEntity> upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
+    m_upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
 
     expect(m_repositoryVersionDAO.findByPK(1L)).andReturn(m_sourceRepositoryVersion).anyTimes();
     expect(m_repositoryVersionDAO.findByPK(99L)).andReturn(m_targetRepositoryVersion).anyTimes();
@@ -143,12 +149,13 @@ public class UpgradeContextTest extends EasyMockSupport {
     expect(m_completedRevertableUpgrade.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
     expect(m_completedRevertableUpgrade.getRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
     expect(m_completedRevertableUpgrade.getOrchestration()).andReturn(RepositoryType.PATCH).anyTimes();
-    expect(m_completedRevertableUpgrade.getHistory()).andReturn(upgradeHistory).anyTimes();
+    expect(m_completedRevertableUpgrade.getHistory()).andReturn(m_upgradeHistory).anyTimes();
     expect(m_completedRevertableUpgrade.getUpgradePackage()).andReturn(null).anyTimes();
 
     RepositoryVersionEntity hdfsRepositoryVersion = createNiceMock(RepositoryVersionEntity.class);
 
     expect(m_hdfsService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
+    expect(m_zookeeperService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
     expect(m_cluster.getService(HDFS_SERVICE_NAME)).andReturn(m_hdfsService).anyTimes();
     m_services.put(HDFS_SERVICE_NAME, m_hdfsService);
 
@@ -331,6 +338,55 @@ public class UpgradeContextTest extends EasyMockSupport {
   }
 
   /**
+   * Tests that the {@link UpgradeContext} for a reversion has the correct
+   * services included in the reversion if one of the services in the original
+   * upgrade has since been deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRevertWithDeletedService() throws Exception {
+    UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+    // give the completed upgrade 2 services which can be reverted
+    UpgradeHistoryEntity upgradeHistoryEntity = createNiceMock(UpgradeHistoryEntity.class);
+    expect(upgradeHistoryEntity.getServiceName()).andReturn(ZOOKEEPER_SERVICE_NAME).anyTimes();
+    expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
+    expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
+    m_upgradeHistory.add(upgradeHistoryEntity);
+
+    expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), EasyMock.anyObject(StackId.class),
+        EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+        EasyMock.anyObject(UpgradeType.class), EasyMock.anyString())).andReturn(upgradePack).once();
+
+    expect(m_upgradeDAO.findRevertable(1L)).andReturn(m_completedRevertableUpgrade).once();
+
+    // remove HDFS, add ZK
+    m_services.remove(HDFS_SERVICE_NAME);
+    expect(m_cluster.getService(ZOOKEEPER_SERVICE_NAME)).andReturn(m_zookeeperService).anyTimes();
+    m_services.put(ZOOKEEPER_SERVICE_NAME, m_zookeeperService);
+    assertEquals(1, m_services.size());
+
+    Map<String, Object> requestMap = new HashMap<>();
+    requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_REVERT_UPGRADE_ID, "1");
+
+    replayAll();
+
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+    assertEquals(Direction.DOWNGRADE, context.getDirection());
+    assertEquals(RepositoryType.PATCH, context.getOrchestrationType());
+    assertEquals(1, context.getSupportedServices().size());
+    assertTrue(context.isPatchRevert());
+
+    verifyAll();
+  }
+
+  /**
    * Tests that if a different {@link UpgradeEntity} is returned instead of the one
    * specified by the
    *


[19/50] [abbrv] ambari git commit: AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar).

Posted by ja...@apache.org.
AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ce2a0a00
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ce2a0a00
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ce2a0a00

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: ce2a0a00921bde8d780c82561902773f76431fce
Parents: 84e616d
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Oct 9 02:41:52 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Oct 9 02:41:52 2017 -0700

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml | 217 ------------------
 .../configuration/hive-interactive-site.xml     | 225 +++++++++++++++++++
 .../stacks/HDP/2.6/services/stack_advisor.py    |  12 +-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |  21 +-
 4 files changed, 241 insertions(+), 234 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index d66cf4c..69d1c69 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -451,223 +451,6 @@ limitations under the License.
     <on-ambari-upgrade add="false"/>
   </property>
 
-  <!-- Druid related properties -->
-  <property>
-    <name>hive.druid.broker.address.default</name>
-    <value>localhost:8082</value>
-    <description>Host name of druid router if any or broker</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-router</type>
-        <name>druid.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.metadata.uri</name>
-    <value>jdbc:mysql://localhost:3355/druid</value>
-    <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.connectURI</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.coordinator.address.default</name>
-    <value>localhost:8082</value>
-    <description>Host name of druid router if any or broker</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-coordinator</type>
-        <name>druid.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.metadata.password</name>
-    <value>{{druid_metadata_password}}</value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Druid Metadata Password</display-name>
-    <description>Druid meta data storage password</description>
-    <value-attributes>
-      <type>password</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-   <property>
-    <name>hive.druid.metadata.username</name>
-    <value>druid</value>
-    <description>Username used to connect to druid metadata storage</description>
-    <on-ambari-upgrade add="false"/>
-     <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.user</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.indexer.segments.granularity</name>
-    <display-name>Default Granularity for the Druid segments</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>YEAR</value>
-        </entry>
-        <entry>
-          <value>MONTH</value>
-        </entry>
-        <entry>
-          <value>WEEK</value>
-        </entry>
-        <entry>
-          <value>DAY</value>
-        </entry>
-        <entry>
-          <value>HOUR</value>
-        </entry>
-        <entry>
-          <value>MINUTE</value>
-        </entry>
-        <entry>
-          <value>SECOND</value>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>MINUTE</value>
-    <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-   <property>
-    <name>hive.druid.indexer.partition.size.max</name>
-    <value>5000000</value>
-    <description>Maximum number of records per segment partition</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-   <property>
-    <name>hive.druid.indexer.memory.rownum.max</name>
-    <value>75000</value>
-    <description>Maximum number of records in memory while storing data in Druid</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.select.distribute</name>
-    <value>true</value>
-    <description>If it is set to true, we distribute the execution of Druid Select queries</description>
-    <on-ambari-upgrade add="false"/>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.druid.basePersistDirectory</name>
-    <value></value>
-    <description>
-      Local temporary directory used to persist intermediate indexing state,
-      if empty (recommended) will default to JVM system property java.io.tmpdir.
-    </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.storage.storageDirectory</name>
-    <value>{{druid_storage_dir}}</value>
-    <description>
-      Druid deep storage location for segments.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hive.druid.metadata.db.type</name>
-    <display-name>Druid metadata storage type </display-name>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mysql</value>
-          <label>MYSQL</label>
-        </entry>
-        <entry>
-          <value>postgresql</value>
-          <label>POSTGRESQL</label>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>mysql</value>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <description>Druid metadata storage type</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.passiveWaitTimeMs</name>
-    <value>30000</value>
-    <description>
-      Wait time in ms default to 30 seconds.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.working.directory</name>
-    <value>/tmp/druid-indexing</value>
-    <description>
-      Default hdfs working directory used to store some intermediate metadata.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.maxTries</name>
-    <value>5</value>
-    <description>
-      Maximum number of http call retries before giving up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.bitmap.type</name>
-    <display-name>Druid metadata storage type </display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>roaring</value>
-        </entry>
-        <entry>
-          <value>concise</value>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>roaring</value>
-    <description>Druid Coding algorithm use to encode the bitmaps</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
   <!-- This property is removed in HDP 2.5 and higher. -->
   <property>
     <name>atlas.rest.address</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
index aae2efa..64cef3e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -124,4 +124,229 @@ limitations under the License.
     <on-ambari-upgrade add="false"/>
   </property>
 
+  <!-- Druid related properties -->
+  <property>
+    <name>hive.druid.broker.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-router</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.uri</name>
+    <value>jdbc:mysql://localhost:3355/druid</value>
+    <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.connectURI</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.coordinator.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-coordinator</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.password</name>
+    <value>{{druid_metadata_password}}</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Druid Metadata Password</display-name>
+    <description>Druid meta data storage password</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+   <property>
+    <name>hive.druid.metadata.username</name>
+    <value>druid</value>
+    <description>Username used to connect to druid metadata storage</description>
+    <on-ambari-upgrade add="false"/>
+     <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.user</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.indexer.segments.granularity</name>
+    <display-name>Default Granularity for the Druid segments</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>YEAR</value>
+        </entry>
+        <entry>
+          <value>MONTH</value>
+        </entry>
+        <entry>
+          <value>WEEK</value>
+        </entry>
+        <entry>
+          <value>DAY</value>
+        </entry>
+        <entry>
+          <value>HOUR</value>
+        </entry>
+        <entry>
+          <value>MINUTE</value>
+        </entry>
+        <entry>
+          <value>SECOND</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>DAY</value>
+    <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.partition.size.max</name>
+    <value>1000000</value>
+    <description>Maximum number of records per segment partition</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.memory.rownum.max</name>
+    <value>75000</value>
+    <description>Maximum number of records in memory while storing data in Druid</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.select.distribute</name>
+    <value>true</value>
+    <description>If it is set to true, we distribute the execution of Druid Select queries</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.druid.basePersistDirectory</name>
+    <value></value>
+    <description>
+      Local temporary directory used to persist intermediate indexing state,
+      if empty (recommended) will default to JVM system property java.io.tmpdir.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.storage.storageDirectory</name>
+    <value>{{druid_storage_dir}}</value>
+    <description>
+      Druid deep storage location for segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.druid.metadata.db.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>postgresql</value>
+          <label>POSTGRESQL</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>mysql</value>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>Druid metadata storage type</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.passiveWaitTimeMs</name>
+    <value>30000</value>
+    <description>
+      Wait time in ms default to 30 seconds.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.working.directory</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Default hdfs working directory used to store some intermediate metadata.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.maxTries</name>
+    <value>5</value>
+    <description>
+      Maximum number of http call retries before giving up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.bitmap.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>roaring</value>
+        </entry>
+        <entry>
+          <value>concise</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>roaring</value>
+    <description>Druid Coding algorithm use to encode the bitmaps</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.http.read.timeout</name>
+    <value>PT10M</value>
+    <description>
+      Maximum number of http call retries before giving up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 0d2925e..b634e71 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -605,7 +605,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     # druid is not in list of services to be installed
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if 'DRUID' in servicesList:
-        putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
+        putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
         if 'druid-coordinator' in services['configurations']:
             component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_COORDINATOR', services, hosts)
             if component_hosts is not None and len(component_hosts) > 0:
@@ -642,11 +642,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
             else:
                 druid_metadata_user = ""
 
-        putHiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
-        putHiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
-        putHiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
-        putHiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
-        putHiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
+        putHiveInteractiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
+        putHiveInteractiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
+        putHiveInteractiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
+        putHiveInteractiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
+        putHiveInteractiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
 
 
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):

http://git-wip-us.apache.org/repos/asf/ambari/blob/ce2a0a00/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 2b4c656..fd7e438 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -75,11 +75,19 @@
             <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
               if-type="cluster-env" if-key="security_enabled" if-value="true"/>
           </definition>
+        </changes>
+      </component>
+      <component name="HIVE_SERVER_INTERACTIVE">
+        <changes>
+          <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
+            <type>tez-interactive-site</type>
+            <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
+          </definition>
           <definition xsi:type="configure" id="hdp_2_6_maint_druid_config_for_hive_hook" summary="Updating druid hive related properties">
             <type>hive-site</type>
             <set key="hive.druid.metadata.password" value="{{druid_metadata_password}}" if-type="druid-common"/>
-            <set key="hive.druid.indexer.segments.granularity" value="MINUTE" if-type="druid-common"/>
-            <set key="hive.druid.indexer.partition.size.max" value="5000000" if-type="druid-common"/>
+            <set key="hive.druid.indexer.segments.granularity" value="DAY" if-type="druid-common"/>
+            <set key="hive.druid.indexer.partition.size.max" value="1000000" if-type="druid-common"/>
             <set key="hive.druid.indexer.memory.rownum.max" value="75000" if-type="druid-common"/>
             <set key="hive.druid.select.distribute" value="true" if-type="druid-common"/>
             <set key="hive.druid.basePersistDirectory" value="" if-type="druid-common"/>
@@ -87,15 +95,6 @@
             <set key="hive.druid.passiveWaitTimeMs" value="30000" if-type="druid-common"/>
             <set key="hive.druid.working.directory" value="/tmp/druid-indexing" if-type="druid-common"/>
             <set key="hive.druid.bitmap.type" value="roaring" if-type="druid-common"/>
-
-          </definition>
-        </changes>
-      </component>
-      <component name="HIVE_SERVER_INTERACTIVE">
-        <changes>
-          <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
-            <type>tez-interactive-site</type>
-            <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
           </definition>
         </changes>
       </component>


[25/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fc80a183
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fc80a183
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fc80a183

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: fc80a1837cc613160e3c60cc3290b7e517b5cd45
Parents: 6eb273e
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Oct 6 16:22:08 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Oct 9 16:06:47 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  |   44 +-
 .../resources/Ambari-DDL-AzureDB-CREATE.sql     | 2147 ++++++++++++++++++
 ambari-server/snippet/Snippet.java              |    8 +
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |    2 +
 .../src/test/python/stacks/utils/RMFTestCase.py |    4 +-
 5 files changed, 2190 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index d5b4469..cd8fce4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -501,6 +501,7 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
+
     return Script.stack_version_from_distro_select
 
 
@@ -525,22 +526,20 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-    
+
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    stack_version_package_formatted = ""
+    if not STACK_VERSION_PLACEHOLDER in name:
+      return name
 
-    if not repo_version:
-      repo_version = self.get_stack_version_before_packages_installed()
+    stack_version_package_formatted = ""
 
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -550,6 +549,16 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
+    if package_version is None or '-' not in package_version:
+        self.load_available_packages()
+        package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+        if package_name is None:
+          raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+        return package_name
+        
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -568,6 +577,7 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
+      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +770,17 @@ class Script(object):
     """
     self.install_packages(env)
 
+  def load_available_packages(self):
+    if self.available_packages_in_repos:
+      return self.available_packages_in_repos
+
+    pkg_provider = get_provider("Package")
+    try:
+      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(Script.config['repositoryFile']['repositories'])
+    except Exception as err:
+      Logger.exception("Unable to load available packages")
+      self.available_packages_in_repos = []
+
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -779,20 +800,14 @@ class Script(object):
         return
       pass
     try:
-      package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
-      pkg_provider = get_provider("Package")
-      try:
-        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
-      except Exception as err:
-        Logger.exception("Unable to load available packages")
-        available_packages_in_repos = []
+      package_list_str = config['hostLevelParams']['package_list']
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.get_package_from_available(package['name'], available_packages_in_repos)
+            name = self.format_package_name(package['name'])
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -1092,5 +1107,6 @@ class Script(object):
 
 
   def __init__(self):
+    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")


[24/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
new file mode 100644
index 0000000..b54132c
--- /dev/null
+++ b/ambari-server/bin/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
@@ -0,0 +1,2147 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License") you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Schema population script for $(AMBARIDBNAME)
+
+Use this script in sqlcmd mode, setting the environment variables like this:
+set AMBARIDBNAME=ambari
+
+sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\Ambari-DDL-SQLServer-CREATE.sql
+*/
+
+
+------create the database------
+
+------create tables and grant privileges to db user---------
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stack') AND type = 'U')
+BEGIN
+CREATE TABLE stack(
+  stack_id BIGINT NOT NULL,
+  stack_name VARCHAR(255) NOT NULL,
+  stack_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_stack PRIMARY KEY CLUSTERED (stack_id),
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extension') AND type = 'U')
+BEGIN
+CREATE TABLE extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY CLUSTERED (extension_id),
+  CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('extensionlink') AND type = 'U')
+BEGIN
+CREATE TABLE extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY CLUSTERED (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresourcetype') AND type = 'U')
+BEGIN
+CREATE TABLE adminresourcetype (
+  resource_type_id INTEGER NOT NULL,
+  resource_type_name VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_adminresourcetype PRIMARY KEY CLUSTERED (resource_type_id)
+  )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminresource') AND type = 'U')
+BEGIN
+CREATE TABLE adminresource (
+  resource_id BIGINT NOT NULL,
+  resource_type_id INTEGER NOT NULL,
+  CONSTRAINT PK_adminresource PRIMARY KEY CLUSTERED (resource_id),
+  CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusters') AND type = 'U')
+BEGIN
+CREATE TABLE clusters (
+  cluster_id BIGINT NOT NULL,
+  resource_id BIGINT NOT NULL,
+  upgrade_id BIGINT,
+  cluster_info VARCHAR(255) NOT NULL,
+  cluster_name VARCHAR(100) NOT NULL UNIQUE,
+  provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
+  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
+  desired_cluster_state VARCHAR(255) NOT NULL,
+  desired_stack_id BIGINT NOT NULL,
+  CONSTRAINT PK_clusters PRIMARY KEY CLUSTERED (cluster_id),
+  CONSTRAINT FK_clusters_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterconfig') AND type = 'U')
+BEGIN
+CREATE TABLE clusterconfig (
+  config_id BIGINT NOT NULL,
+  version_tag VARCHAR(255) NOT NULL,
+  version BIGINT NOT NULL,
+  type_name VARCHAR(255) NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  selected SMALLINT NOT NULL DEFAULT 0,
+  config_data VARCHAR(MAX) NOT NULL,
+  config_attributes VARCHAR(MAX),
+  create_timestamp BIGINT NOT NULL,
+  unmapped SMALLINT NOT NULL DEFAULT 0,
+  selected_timestamp BIGINT NOT NULL DEFAULT 0,
+  CONSTRAINT PK_clusterconfig PRIMARY KEY CLUSTERED (config_id),
+  CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+  CONSTRAINT FK_clusterconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag),
+  CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfig') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfig (
+  service_config_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  version BIGINT NOT NULL,
+  create_timestamp BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+  group_id BIGINT,
+  note VARCHAR(MAX),
+  CONSTRAINT PK_serviceconfig PRIMARY KEY CLUSTERED (service_config_id),
+  CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hosts') AND type = 'U')
+BEGIN
+CREATE TABLE hosts (
+  host_id BIGINT NOT NULL,
+  host_name VARCHAR(255) NOT NULL,
+  cpu_count INTEGER NOT NULL,
+  ph_cpu_count INTEGER,
+  cpu_info VARCHAR(255) NOT NULL,
+  discovery_status VARCHAR(2000) NOT NULL,
+  host_attributes VARCHAR(MAX) NOT NULL,
+  ipv4 VARCHAR(255),
+  ipv6 VARCHAR(255),
+  public_host_name VARCHAR(255),
+  last_registration_time BIGINT NOT NULL,
+  os_arch VARCHAR(255) NOT NULL,
+  os_info VARCHAR(1000) NOT NULL,
+  os_type VARCHAR(255) NOT NULL,
+  rack_info VARCHAR(255) NOT NULL,
+  total_mem BIGINT NOT NULL,
+  CONSTRAINT PK_hosts PRIMARY KEY CLUSTERED (host_id),
+  CONSTRAINT UQ_hosts_host_name UNIQUE (host_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfighosts') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfighosts (
+  service_config_id BIGINT NOT NULL,
+  host_id BIGINT NOT NULL,
+  CONSTRAINT PK_serviceconfighosts PRIMARY KEY CLUSTERED (service_config_id, host_id),
+  CONSTRAINT FK_scvhosts_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id),
+  CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('serviceconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE serviceconfigmapping (
+  service_config_id BIGINT NOT NULL,
+  config_id BIGINT NOT NULL,
+  CONSTRAINT PK_serviceconfigmapping PRIMARY KEY CLUSTERED (service_config_id, config_id),
+  CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id),
+  CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterservices') AND type = 'U')
+BEGIN
+CREATE TABLE clusterservices (
+  service_name VARCHAR(255) NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  service_enabled INT NOT NULL,
+  CONSTRAINT PK_clusterservices PRIMARY KEY CLUSTERED (service_name, cluster_id),
+  CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('clusterstate') AND type = 'U')
+BEGIN
+CREATE TABLE clusterstate (
+  cluster_id BIGINT NOT NULL,
+  current_cluster_state VARCHAR(255) NOT NULL,
+  current_stack_id BIGINT NOT NULL,
+  CONSTRAINT PK_clusterstate PRIMARY KEY CLUSTERED (cluster_id),
+  CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+  CONSTRAINT FK_cs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('repo_version') AND type = 'U')
+BEGIN
+CREATE TABLE repo_version (
+  repo_version_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  version VARCHAR(255) NOT NULL,
+  display_name VARCHAR(128) NOT NULL,
+  repositories VARCHAR(MAX) NOT NULL,
+  repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
+  hidden SMALLINT NOT NULL DEFAULT 0,
+  resolved BIT NOT NULL DEFAULT 0,
+  version_url VARCHAR(1024),
+  version_xml VARCHAR(MAX),
+  version_xsd VARCHAR(512),
+  parent_id BIGINT,
+  CONSTRAINT PK_repo_version PRIMARY KEY CLUSTERED (repo_version_id),
+  CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name),
+  CONSTRAINT UQ_repo_version_stack_id UNIQUE (stack_id, version))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponentdesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE servicecomponentdesiredstate (
+  id BIGINT NOT NULL,
+  component_name VARCHAR(255) NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
+  desired_state VARCHAR(255) NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'NOT_REQUIRED',
+  CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
+  CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentdesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE hostcomponentdesiredstate (
+  id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  component_name VARCHAR(255) NOT NULL,
+  desired_state VARCHAR(255) NOT NULL,
+  host_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  admin_state VARCHAR(32),
+  maintenance_state VARCHAR(32) NOT NULL,
+  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+  restart_required BIT NOT NULL DEFAULT 0,
+  CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
+  CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+  CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostcomponentstate') AND type = 'U')
+BEGIN
+CREATE TABLE hostcomponentstate (
+  id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  component_name VARCHAR(255) NOT NULL,
+  version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
+  current_state VARCHAR(255) NOT NULL,
+  host_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
+  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+  CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+  CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_host_component_state')
+BEGIN
+CREATE NONCLUSTERED INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hoststate') AND type = 'U')
+BEGIN
+CREATE TABLE hoststate (
+  agent_version VARCHAR(255) NOT NULL,
+  available_mem BIGINT NOT NULL,
+  current_state VARCHAR(255) NOT NULL,
+  health_status VARCHAR(255),
+  host_id BIGINT NOT NULL,
+  time_in_state BIGINT NOT NULL,
+  maintenance_state VARCHAR(512),
+  CONSTRAINT PK_hoststate PRIMARY KEY CLUSTERED (host_id),
+  CONSTRAINT FK_hoststate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicedesiredstate') AND type = 'U')
+BEGIN
+CREATE TABLE servicedesiredstate (
+  cluster_id BIGINT NOT NULL,
+  desired_host_role_mapping INTEGER NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
+  desired_state VARCHAR(255) NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  maintenance_state VARCHAR(32) NOT NULL,
+  security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+  credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
+  CONSTRAINT PK_servicedesiredstate PRIMARY KEY CLUSTERED (cluster_id,service_name),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
+  CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipaltype') AND type = 'U')
+BEGIN
+CREATE TABLE adminprincipaltype (
+  principal_type_id INTEGER NOT NULL,
+  principal_type_name VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_adminprincipaltype PRIMARY KEY CLUSTERED (principal_type_id)
+  )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprincipal') AND type = 'U')
+BEGIN
+CREATE TABLE adminprincipal (
+  principal_id BIGINT NOT NULL,
+  principal_type_id INTEGER NOT NULL,
+  CONSTRAINT PK_adminprincipal PRIMARY KEY CLUSTERED (principal_id),
+  CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('users') AND type = 'U')
+BEGIN
+CREATE TABLE users (
+  user_id INTEGER,
+  principal_id BIGINT NOT NULL,
+  ldap_user INTEGER NOT NULL DEFAULT 0,
+  user_name VARCHAR(255) NOT NULL,
+  user_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
+  create_time DATETIME DEFAULT GETDATE(),
+  user_password VARCHAR(255),
+  active INTEGER NOT NULL DEFAULT 1,
+  active_widget_layouts VARCHAR(1024) DEFAULT NULL,
+  CONSTRAINT PK_users PRIMARY KEY CLUSTERED (user_id),
+  CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+  CONSTRAINT UNQ_users_0 UNIQUE (user_name, user_type))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('groups') AND type = 'U')
+BEGIN
+CREATE TABLE groups (
+  group_id INTEGER,
+  principal_id BIGINT NOT NULL,
+  group_name VARCHAR(255) NOT NULL,
+  ldap_group INTEGER NOT NULL DEFAULT 0,
+  group_type VARCHAR(255) NOT NULL DEFAULT 'LOCAL',
+  CONSTRAINT PK_groups PRIMARY KEY CLUSTERED (group_id),
+  CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+  CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('members') AND type = 'U')
+BEGIN
+CREATE TABLE members (
+  member_id INTEGER,
+  group_id INTEGER NOT NULL,
+  user_id INTEGER NOT NULL,
+  CONSTRAINT PK_members PRIMARY KEY CLUSTERED (member_id),
+  CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id),
+  CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id),
+  CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedule') AND type = 'U')
+BEGIN
+CREATE TABLE requestschedule (
+  schedule_id BIGINT,
+  cluster_id BIGINT NOT NULL,
+  description VARCHAR(255),
+  STATUS VARCHAR(255),
+  batch_separation_seconds SMALLINT,
+  batch_toleration_limit SMALLINT,
+  authenticated_user_id INTEGER,
+  create_user VARCHAR(255),
+  create_timestamp BIGINT,
+  update_user VARCHAR(255),
+  update_timestamp BIGINT,
+  minutes VARCHAR(10),
+  hours VARCHAR(10),
+  days_of_month VARCHAR(10),
+  month VARCHAR(10),
+  day_of_week VARCHAR(10),
+  yearToSchedule VARCHAR(10),
+  startTime VARCHAR(50),
+  endTime VARCHAR(50),
+  last_execution_status VARCHAR(255),
+  CONSTRAINT PK_requestschedule PRIMARY KEY CLUSTERED (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('request') AND type = 'U')
+BEGIN
+CREATE TABLE request (
+  request_id BIGINT NOT NULL,
+  cluster_id BIGINT,
+  command_name VARCHAR(255),
+  create_time BIGINT NOT NULL,
+  end_time BIGINT NOT NULL,
+  exclusive_execution BIT NOT NULL DEFAULT 0,
+  inputs VARBINARY(MAX),
+  request_context VARCHAR(255),
+  request_type VARCHAR(255),
+  request_schedule_id BIGINT,
+  start_time BIGINT NOT NULL,
+  status VARCHAR(255),
+  cluster_host_info VARBINARY(MAX) NOT NULL,
+  CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
+  CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('stage') AND type = 'U')
+BEGIN
+CREATE TABLE stage (
+  stage_id BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  skippable SMALLINT DEFAULT 0 NOT NULL,
+  supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
+  log_info VARCHAR(255) NOT NULL,
+  request_context VARCHAR(255),
+  command_params VARBINARY(MAX),
+  host_params VARBINARY(MAX),
+  command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
+  CONSTRAINT PK_stage PRIMARY KEY CLUSTERED (stage_id, request_id),
+  CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_role_command') AND type = 'U')
+BEGIN
+CREATE TABLE host_role_command (
+  task_id BIGINT NOT NULL,
+  attempt_count SMALLINT NOT NULL,
+  retry_allowed SMALLINT DEFAULT 0 NOT NULL,
+  event VARCHAR(MAX) NOT NULL,
+  exitcode INTEGER NOT NULL,
+  host_id BIGINT,
+  last_attempt_time BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  role VARCHAR(255),
+  stage_id BIGINT NOT NULL,
+  start_time BIGINT NOT NULL,
+  original_start_time BIGINT NOT NULL,
+  end_time BIGINT,
+  status VARCHAR(255),
+  auto_skip_on_failure SMALLINT DEFAULT 0 NOT NULL,
+  std_error VARBINARY(max),
+  std_out VARBINARY(max),
+  output_log VARCHAR(255) NULL,
+  error_log VARCHAR(255) NULL,
+  structured_out VARBINARY(max),
+  role_command VARCHAR(255),
+  command_detail VARCHAR(255),
+  custom_command_name VARCHAR(255),
+  is_background SMALLINT DEFAULT 0 NOT NULL,
+  CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
+  CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+  CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('execution_command') AND type = 'U')
+BEGIN
+CREATE TABLE execution_command (
+  command VARBINARY(MAX),
+  task_id BIGINT NOT NULL,
+  CONSTRAINT PK_execution_command PRIMARY KEY CLUSTERED (task_id),
+  CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('role_success_criteria') AND type = 'U')
+BEGIN
+CREATE TABLE role_success_criteria (
+  ROLE VARCHAR(255) NOT NULL,
+  request_id BIGINT NOT NULL,
+  stage_id BIGINT NOT NULL,
+  success_factor FLOAT NOT NULL,
+  CONSTRAINT PK_role_success_criteria PRIMARY KEY CLUSTERED (ROLE, request_id, stage_id),
+  CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestresourcefilter') AND type = 'U')
+BEGIN
+CREATE TABLE requestresourcefilter (
+  filter_id BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  service_name VARCHAR(255),
+  component_name VARCHAR(255),
+  hosts VARBINARY(MAX),
+  CONSTRAINT PK_requestresourcefilter PRIMARY KEY CLUSTERED (filter_id),
+  CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestoperationlevel') AND type = 'U')
+BEGIN
+CREATE TABLE requestoperationlevel (
+  operation_level_id BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  level_name VARCHAR(255),
+  cluster_name VARCHAR(255),
+  service_name VARCHAR(255),
+  host_component_name VARCHAR(255),
+  host_id BIGINT NULL,      -- unlike most host_id columns, this one allows NULLs because the request can be at the service level
+  CONSTRAINT PK_requestoperationlevel PRIMARY KEY CLUSTERED (operation_level_id),
+  CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ClusterHostMapping') AND type = 'U')
+BEGIN
+CREATE TABLE ClusterHostMapping (
+  cluster_id BIGINT NOT NULL,
+  host_id BIGINT NOT NULL,
+  CONSTRAINT PK_ClusterHostMapping PRIMARY KEY CLUSTERED (cluster_id, host_id),
+  CONSTRAINT FK_clhostmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+  CONSTRAINT FK_clusterhostmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('key_value_store') AND type = 'U')
+BEGIN
+CREATE TABLE key_value_store (
+  [key] VARCHAR(255),
+  [value] VARCHAR(MAX),
+  CONSTRAINT PK_key_value_store PRIMARY KEY CLUSTERED ([key])
+  )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE hostconfigmapping (
+  cluster_id BIGINT NOT NULL,
+  host_id BIGINT NOT NULL,
+  type_name VARCHAR(255) NOT NULL,
+  version_tag VARCHAR(255) NOT NULL,
+  service_name VARCHAR(255),
+  create_timestamp BIGINT NOT NULL,
+  selected INTEGER NOT NULL DEFAULT 0,
+  user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+  CONSTRAINT PK_hostconfigmapping PRIMARY KEY CLUSTERED (cluster_id, host_id, type_name, create_timestamp),
+  CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
+  CONSTRAINT FK_hostconfmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('metainfo') AND type = 'U')
+BEGIN
+CREATE TABLE metainfo (
+  [metainfo_key] VARCHAR(255),
+  [metainfo_value] VARCHAR(255),
+  CONSTRAINT PK_metainfo PRIMARY KEY CLUSTERED ([metainfo_key])
+  )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_sequences') AND type = 'U')
+BEGIN
+CREATE TABLE ambari_sequences (
+  sequence_name VARCHAR(255),
+  [sequence_value] BIGINT NOT NULL,
+  CONSTRAINT PK_ambari_sequences PRIMARY KEY (sequence_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgroup') AND type = 'U')
+BEGIN
+CREATE TABLE configgroup (
+  group_id BIGINT,
+  cluster_id BIGINT NOT NULL,
+  group_name VARCHAR(255) NOT NULL,
+  tag VARCHAR(1024) NOT NULL,
+  description VARCHAR(1024),
+  create_timestamp BIGINT NOT NULL,
+  service_name VARCHAR(255),
+  CONSTRAINT PK_configgroup PRIMARY KEY CLUSTERED (group_id),
+  CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('confgroupclusterconfigmapping') AND type = 'U')
+BEGIN
+CREATE TABLE confgroupclusterconfigmapping (
+  config_group_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  config_type VARCHAR(255) NOT NULL,
+  version_tag VARCHAR(255) NOT NULL,
+  user_name VARCHAR(255) DEFAULT '_db',
+  create_timestamp BIGINT NOT NULL,
+  CONSTRAINT PK_confgroupclustercfgmapping PRIMARY KEY CLUSTERED (config_group_id, cluster_id, config_type),
+  CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
+  CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('configgrouphostmapping') AND type = 'U')
+BEGIN
+CREATE TABLE configgrouphostmapping (
+  config_group_id BIGINT NOT NULL,
+  host_id BIGINT NOT NULL,
+  CONSTRAINT PK_configgrouphostmapping PRIMARY KEY CLUSTERED (config_group_id, host_id),
+  CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id),
+  CONSTRAINT FK_cghm_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('requestschedulebatchrequest') AND type = 'U')
+BEGIN
+CREATE TABLE requestschedulebatchrequest (
+  schedule_id BIGINT,
+  batch_id BIGINT,
+  request_id BIGINT,
+  request_type VARCHAR(255),
+  request_uri VARCHAR(1024),
+  request_body VARBINARY(MAX),
+  request_status VARCHAR(255),
+  return_code SMALLINT,
+  return_message TEXT,
+  CONSTRAINT PK_requestschedulebatchrequest PRIMARY KEY CLUSTERED (schedule_id, batch_id),
+  CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint (
+  blueprint_name VARCHAR(255) NOT NULL,
+  stack_id BIGINT NOT NULL,
+  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
+  security_descriptor_reference VARCHAR(255),
+  CONSTRAINT PK_blueprint PRIMARY KEY CLUSTERED (blueprint_name),
+  CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup (
+  blueprint_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  cardinality VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME),
+  CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_component') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup_component (
+  blueprint_name VARCHAR(255) NOT NULL,
+  hostgroup_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  provision_action VARCHAR(255),
+  CONSTRAINT PK_hostgroup_component PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, NAME),
+  CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_configuration') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint_configuration (
+  blueprint_name VARCHAR(255) NOT NULL,
+  type_name VARCHAR(255) NOT NULL,
+  config_data VARCHAR(MAX) NOT NULL,
+  config_attributes VARCHAR(MAX),
+  CONSTRAINT PK_blueprint_configuration PRIMARY KEY CLUSTERED (blueprint_name, type_name),
+  CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('blueprint_setting') AND type = 'U')
+BEGIN
+CREATE TABLE blueprint_setting (
+  id BIGINT NOT NULL,
+  blueprint_name VARCHAR(255) NOT NULL,
+  setting_name VARCHAR(255) NOT NULL,
+  setting_data TEXT NOT NULL,
+  CONSTRAINT PK_blueprint_setting PRIMARY KEY (id),
+  CONSTRAINT UQ_blueprint_setting_name UNIQUE(blueprint_name,setting_name),
+  CONSTRAINT FK_blueprint_setting_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name)
+  )
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('hostgroup_configuration') AND type = 'U')
+BEGIN
+CREATE TABLE hostgroup_configuration (
+  blueprint_name VARCHAR(255) NOT NULL,
+  hostgroup_name VARCHAR(255) NOT NULL,
+  type_name VARCHAR(255) NOT NULL,
+  config_data VARCHAR(MAX) NOT NULL,
+  config_attributes VARCHAR(MAX),
+  CONSTRAINT PK_hostgroup_configuration PRIMARY KEY CLUSTERED (blueprint_name, hostgroup_name, type_name),
+  CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewmain') AND type = 'U')
+BEGIN
+CREATE TABLE viewmain (
+  view_name VARCHAR(255) NOT NULL,
+  label VARCHAR(255),
+  description VARCHAR(2048),
+  version VARCHAR(255),
+  build VARCHAR(128),
+  resource_type_id INTEGER NOT NULL,
+  icon VARCHAR(255),
+  icon64 VARCHAR(255),
+  archive VARCHAR(255),
+  mask VARCHAR(255),
+  system_view BIT NOT NULL DEFAULT 0,
+  CONSTRAINT PK_viewmain PRIMARY KEY CLUSTERED (view_name),
+  CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id))
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewurl') AND type = 'U')
+BEGIN
+CREATE table viewurl(
+  url_id BIGINT ,
+  url_name VARCHAR(255) NOT NULL ,
+  url_suffix VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_viewurl PRIMARY KEY CLUSTERED (url_id)
+)
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstance') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstance (
+  view_instance_id BIGINT,
+  resource_id BIGINT NOT NULL,
+  view_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  label VARCHAR(255),
+  description VARCHAR(2048),
+  visible CHAR(1),
+  icon VARCHAR(255),
+  icon64 VARCHAR(255),
+  xml_driven CHAR(1),
+  alter_names BIT NOT NULL DEFAULT 1,
+  cluster_handle BIGINT,
+  cluster_type VARCHAR(100) NOT NULL DEFAULT 'LOCAL_AMBARI',
+  short_url BIGINT,
+  CONSTRAINT PK_viewinstance PRIMARY KEY CLUSTERED (view_instance_id),
+  CONSTRAINT FK_instance_url_id FOREIGN KEY (short_url) REFERENCES viewurl(url_id),
+  CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name),
+  CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id),
+  CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name),
+  CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstancedata') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstancedata (
+  view_instance_id BIGINT,
+  view_name VARCHAR(255) NOT NULL,
+  view_instance_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  user_name VARCHAR(255) NOT NULL,
+  value VARCHAR(2000) NOT NULL,
+  CONSTRAINT PK_viewinstancedata PRIMARY KEY CLUSTERED (view_instance_id, NAME, user_name),
+  CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name))
+END
+
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewinstanceproperty') AND type = 'U')
+BEGIN
+CREATE TABLE viewinstanceproperty (
+  view_name VARCHAR(255) NOT NULL,
+  view_instance_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  value VARCHAR(2000),
+  CONSTRAINT PK_viewinstanceproperty PRIMARY KEY CLUSTERED (view_name, view_instance_name, NAME),
+  CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewparameter') AND type = 'U')
+BEGIN
+CREATE TABLE viewparameter (
+  view_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  description VARCHAR(2048),
+  label VARCHAR(255),
+  placeholder VARCHAR(255),
+  default_value VARCHAR(2000),
+  cluster_config VARCHAR(255),
+  required CHAR(1),
+  masked CHAR(1),
+  CONSTRAINT PK_viewparameter PRIMARY KEY CLUSTERED (view_name, NAME),
+  CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewresource') AND type = 'U')
+BEGIN
+CREATE TABLE viewresource (
+  view_name VARCHAR(255) NOT NULL,
+  NAME VARCHAR(255) NOT NULL,
+  plural_name VARCHAR(255),
+  id_property VARCHAR(255),
+  subResource_names VARCHAR(255),
+  provider VARCHAR(255),
+  service VARCHAR(255),
+  resource VARCHAR(255),
+  CONSTRAINT PK_viewresource PRIMARY KEY CLUSTERED (view_name, NAME),
+  CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('viewentity') AND type = 'U')
+BEGIN
+CREATE TABLE viewentity (
+  id BIGINT NOT NULL,
+  view_name VARCHAR(255) NOT NULL,
+  view_instance_name VARCHAR(255) NOT NULL,
+  class_name VARCHAR(255) NOT NULL,
+  id_property VARCHAR(255),
+  CONSTRAINT PK_viewentity PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminpermission') AND type = 'U')
+BEGIN
+CREATE TABLE adminpermission (
+  permission_id BIGINT NOT NULL,
+  permission_name VARCHAR(255) NOT NULL,
+  resource_type_id INTEGER NOT NULL,
+  permission_label VARCHAR(255),
+  principal_id BIGINT NOT NULL,
+  sort_order SMALLINT NOT NULL DEFAULT 1,
+  CONSTRAINT PK_adminpermission PRIMARY KEY CLUSTERED (permission_id),
+  CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id),
+  CONSTRAINT FK_permission_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+  CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('roleauthorization') AND type = 'U')
+BEGIN
+CREATE TABLE roleauthorization (
+  authorization_id VARCHAR(100) NOT NULL,
+  authorization_name VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_roleauthorization PRIMARY KEY (authorization_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('permission_roleauthorization') AND type = 'U')
+BEGIN
+CREATE TABLE permission_roleauthorization (
+  permission_id BIGINT NOT NULL,
+  authorization_id VARCHAR(100) NOT NULL,
+  CONSTRAINT PK_permsn_roleauthorization PRIMARY KEY (permission_id, authorization_id),
+  CONSTRAINT FK_permission_roleauth_aid FOREIGN KEY (authorization_id) REFERENCES roleauthorization(authorization_id),
+  CONSTRAINT FK_permission_roleauth_pid FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('adminprivilege') AND type = 'U')
+BEGIN
+CREATE TABLE adminprivilege (
+  privilege_id BIGINT,
+  permission_id BIGINT NOT NULL,
+  resource_id BIGINT NOT NULL,
+  principal_id BIGINT NOT NULL,
+  CONSTRAINT PK_adminprivilege PRIMARY KEY CLUSTERED (privilege_id),
+  CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id),
+  CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id),
+  CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('host_version') AND type = 'U')
+BEGIN
+CREATE TABLE host_version (
+  id BIGINT NOT NULL,
+  repo_version_id BIGINT NOT NULL,
+  host_id BIGINT NOT NULL,
+  STATE VARCHAR(32) NOT NULL,
+  CONSTRAINT PK_host_version PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_host_version_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+  CONSTRAINT FK_host_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id),
+  CONSTRAINT UQ_host_repo UNIQUE(host_id, repo_version_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('artifact') AND type = 'U')
+BEGIN
+CREATE TABLE artifact (
+  artifact_name VARCHAR(255) NOT NULL,
+  artifact_data TEXT NOT NULL,
+  foreign_keys VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_artifact PRIMARY KEY CLUSTERED (artifact_name, foreign_keys)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget') AND type = 'U')
+BEGIN
+CREATE TABLE widget (
+  id BIGINT NOT NULL,
+  widget_name VARCHAR(255) NOT NULL,
+  widget_type VARCHAR(255) NOT NULL,
+  metrics TEXT,
+  time_created BIGINT NOT NULL,
+  author VARCHAR(255),
+  description VARCHAR(2048),
+  default_section_name VARCHAR(255),
+  scope VARCHAR(255),
+  widget_values VARCHAR(4000),
+  properties VARCHAR(4000),
+  cluster_id BIGINT NOT NULL,
+  CONSTRAINT PK_widget PRIMARY KEY CLUSTERED (id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout') AND type = 'U')
+BEGIN
+CREATE TABLE widget_layout (
+  id BIGINT NOT NULL,
+  layout_name VARCHAR(255) NOT NULL,
+  section_name VARCHAR(255) NOT NULL,
+  scope VARCHAR(255) NOT NULL,
+  user_name VARCHAR(255) NOT NULL,
+  display_name VARCHAR(255),
+  cluster_id BIGINT NOT NULL,
+  CONSTRAINT PK_widget_layout PRIMARY KEY CLUSTERED (id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('widget_layout_user_widget') AND type = 'U')
+BEGIN
+CREATE TABLE widget_layout_user_widget (
+  widget_layout_id BIGINT NOT NULL,
+  widget_id BIGINT NOT NULL,
+  widget_order smallint,
+  CONSTRAINT PK_widget_layout_user_widget PRIMARY KEY CLUSTERED (widget_layout_id, widget_id),
+  CONSTRAINT FK_widget_id FOREIGN KEY (widget_id) REFERENCES widget(id),
+  CONSTRAINT FK_widget_layout_id FOREIGN KEY (widget_layout_id) REFERENCES widget_layout(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_request (
+  id BIGINT NOT NULL,
+  action VARCHAR(255) NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  bp_name VARCHAR(100) NOT NULL,
+  cluster_properties TEXT,
+  cluster_attributes TEXT,
+  description VARCHAR(1024),
+  provision_action VARCHAR(255),
+  CONSTRAINT PK_topology_request PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_hostgroup') AND type = 'U')
+BEGIN
+CREATE TABLE topology_hostgroup (
+  id BIGINT NOT NULL,
+  name VARCHAR(255) NOT NULL,
+  group_properties TEXT,
+  group_attributes TEXT,
+  request_id BIGINT NOT NULL,
+  CONSTRAINT PK_topology_hostgroup PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_info') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_info (
+  id BIGINT NOT NULL,
+  group_id BIGINT NOT NULL,
+  fqdn VARCHAR(255),
+  host_id BIGINT,
+  host_count INTEGER,
+  predicate VARCHAR(2048),
+  rack_info VARCHAR(255),
+  CONSTRAINT PK_topology_host_info PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_hostinfo_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
+  CONSTRAINT FK_hostinfo_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_logical_request (
+  id BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  description VARCHAR(1024),
+  CONSTRAINT PK_topology_logical_request PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_logicalreq_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_request') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_request (
+  id BIGINT NOT NULL,
+  logical_request_id BIGINT NOT NULL,
+  group_id BIGINT NOT NULL,
+  stage_id BIGINT NOT NULL,
+  host_name VARCHAR(255),
+  CONSTRAINT PK_topology_host_request PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_hostreq_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id),
+  CONSTRAINT FK_hostreq_logicalreq_id FOREIGN KEY (logical_request_id) REFERENCES topology_logical_request(id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_host_task') AND type = 'U')
+BEGIN
+CREATE TABLE topology_host_task (
+  id BIGINT NOT NULL,
+  host_request_id BIGINT NOT NULL,
+  type VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_topology_host_task PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_hosttask_req_id FOREIGN KEY (host_request_id) REFERENCES topology_host_request (id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('topology_logical_task') AND type = 'U')
+BEGIN
+CREATE TABLE topology_logical_task (
+  id BIGINT NOT NULL,
+  host_task_id BIGINT NOT NULL,
+  physical_task_id BIGINT,
+  component VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_topology_logical_task PRIMARY KEY CLUSTERED (id),
+  CONSTRAINT FK_ltask_hosttask_id FOREIGN KEY (host_task_id) REFERENCES topology_host_task (id),
+  CONSTRAINT FK_ltask_hrc_id FOREIGN KEY (physical_task_id) REFERENCES host_role_command (task_id))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('setting') AND type = 'U')
+BEGIN
+CREATE TABLE setting (
+  id BIGINT NOT NULL,
+  name VARCHAR(255) NOT NULL UNIQUE,
+  setting_type VARCHAR(255) NOT NULL,
+  content TEXT NOT NULL,
+  updated_by VARCHAR(255) NOT NULL DEFAULT '_db',
+  update_timestamp BIGINT NOT NULL,
+  CONSTRAINT PK_setting PRIMARY KEY (id)
+)
+END
+
+
+-- Remote Cluster table
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambaricluster') AND type = 'U')
+BEGIN
+CREATE TABLE remoteambaricluster(
+  cluster_id BIGINT NOT NULL,
+  name VARCHAR(255) NOT NULL,
+  username VARCHAR(255) NOT NULL,
+  url VARCHAR(255) NOT NULL,
+  password VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('remoteambariclusterservice') AND type = 'U')
+BEGIN
+CREATE TABLE remoteambariclusterservice(
+  id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_remote_ambari_service PRIMARY KEY (id),
+  CONSTRAINT FK_remote_ambari_cluster_id FOREIGN KEY (cluster_id) REFERENCES remoteambaricluster(cluster_id)
+)
+END
+
+
+-- Remote Cluster table ends
+
+-- upgrade tables
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade (
+  upgrade_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  request_id BIGINT NOT NULL,
+  direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+  orchestration VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
+  upgrade_package VARCHAR(255) NOT NULL,
+  upgrade_type VARCHAR(32) NOT NULL,
+  repo_version_id BIGINT NOT NULL,
+  skip_failures BIT NOT NULL DEFAULT 0,
+  skip_sc_failures BIT NOT NULL DEFAULT 0,
+  downgrade_allowed BIT NOT NULL DEFAULT 1,
+  revert_allowed BIT NOT NULL DEFAULT 0,
+  suspended BIT DEFAULT 0 NOT NULL,
+  CONSTRAINT PK_upgrade PRIMARY KEY CLUSTERED (upgrade_id),
+  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+  FOREIGN KEY (request_id) REFERENCES request(request_id),
+  FOREIGN KEY (repo_version_id) REFERENCES repo_version(repo_version_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_group') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_group (
+  upgrade_group_id BIGINT NOT NULL,
+  upgrade_id BIGINT NOT NULL,
+  group_name VARCHAR(255) DEFAULT '' NOT NULL,
+  group_title VARCHAR(1024) DEFAULT '' NOT NULL,
+  CONSTRAINT PK_upgrade_group PRIMARY KEY CLUSTERED (upgrade_group_id),
+  FOREIGN KEY (upgrade_id) REFERENCES upgrade(upgrade_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_item') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_item (
+  upgrade_item_id BIGINT NOT NULL,
+  upgrade_group_id BIGINT NOT NULL,
+  stage_id BIGINT NOT NULL,
+  state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
+  hosts TEXT,
+  tasks TEXT,
+  item_text TEXT,
+  CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
+  FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('upgrade_history') AND type = 'U')
+BEGIN
+CREATE TABLE upgrade_history(
+  id BIGINT NOT NULL,
+  upgrade_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  component_name VARCHAR(255) NOT NULL,
+  from_repo_version_id BIGINT NOT NULL,
+  target_repo_version_id BIGINT NOT NULL,
+  CONSTRAINT PK_upgrade_hist PRIMARY KEY (id),
+  CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id),
+  CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id),
+  CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id),
+  CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('servicecomponent_version') AND type = 'U')
+BEGIN
+CREATE TABLE servicecomponent_version(
+  id BIGINT NOT NULL,
+  component_id BIGINT NOT NULL,
+  repo_version_id BIGINT NOT NULL,
+  state VARCHAR(32) NOT NULL,
+  user_name VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_sc_version PRIMARY KEY (id),
+  CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id),
+  CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('ambari_operation_history') AND type = 'U')
+BEGIN
+CREATE TABLE ambari_operation_history(
+  id BIGINT NOT NULL,
+  from_version VARCHAR(255) NOT NULL,
+  to_version VARCHAR(255) NOT NULL,
+  start_time BIGINT NOT NULL,
+  end_time BIGINT,
+  operation_type VARCHAR(255) NOT NULL,
+  comments TEXT,
+  CONSTRAINT PK_ambari_operation_history PRIMARY KEY (id)
+)
+END
+
+
+
+-- tasks indices --
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_stage_request_id')
+BEGIN
+CREATE INDEX idx_stage_request_id ON stage (request_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_request_id')
+BEGIN
+CREATE INDEX idx_hrc_request_id ON host_role_command (request_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_hrc_status_role')
+BEGIN
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_rsc_request_id')
+BEGIN
+CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id)
+END
+
+
+
+-- altering tables by creating unique constraints----------
+--------altering tables to add constraints----------
+
+-- altering tables by creating foreign keys----------
+-- Note, Oracle has a limitation of 32 chars in the FK name, and we should use the same FK name in all DB types.
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('FK_clusters_upgrade_id') AND type = 'F')
+BEGIN
+ALTER TABLE clusters ADD CONSTRAINT FK_clusters_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id)
+END
+
+
+-- Kerberos
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_principal (
+  principal_name VARCHAR(255) NOT NULL,
+  is_service SMALLINT NOT NULL DEFAULT 1,
+  cached_keytab_path VARCHAR(255),
+  CONSTRAINT PK_kerberos_principal PRIMARY KEY CLUSTERED (principal_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_principal_host') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_principal_host (
+  principal_name VARCHAR(255) NOT NULL,
+  host_id BIGINT NOT NULL,
+  CONSTRAINT PK_kerberos_principal_host PRIMARY KEY CLUSTERED (principal_name, host_id),
+  CONSTRAINT FK_krb_pr_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
+  CONSTRAINT FK_krb_pr_host_principalname FOREIGN KEY (principal_name) REFERENCES kerberos_principal (principal_name))
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('kerberos_descriptor') AND type = 'U')
+BEGIN
+CREATE TABLE kerberos_descriptor
+(
+   kerberos_descriptor_name   VARCHAR(255) NOT NULL,
+   kerberos_descriptor        VARCHAR(MAX) NOT NULL,
+   CONSTRAINT PK_kerberos_descriptor PRIMARY KEY (kerberos_descriptor_name)
+)
+END
+
+
+-- Kerberos (end)
+
+-- Alerting Framework
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_definition') AND type = 'U')
+BEGIN
+CREATE TABLE alert_definition (
+  definition_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  definition_name VARCHAR(255) NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  component_name VARCHAR(255),
+  scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
+  label VARCHAR(255),
+  help_url VARCHAR(512),
+  description TEXT,
+  enabled SMALLINT DEFAULT 1 NOT NULL,
+  schedule_interval INTEGER NOT NULL,
+  source_type VARCHAR(255) NOT NULL,
+  alert_source TEXT NOT NULL,
+  hash VARCHAR(64) NOT NULL,
+  ignore_host SMALLINT DEFAULT 0 NOT NULL,
+  repeat_tolerance INTEGER DEFAULT 1 NOT NULL,
+  repeat_tolerance_enabled SMALLINT DEFAULT 0 NOT NULL,
+  CONSTRAINT PK_alert_definition PRIMARY KEY CLUSTERED (definition_id),
+  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+  CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_history') AND type = 'U')
+BEGIN
+CREATE TABLE alert_history (
+  alert_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  alert_definition_id BIGINT NOT NULL,
+  service_name VARCHAR(255) NOT NULL,
+  component_name VARCHAR(255),
+  host_name VARCHAR(255),
+  alert_instance VARCHAR(255),
+  alert_timestamp BIGINT NOT NULL,
+  alert_label VARCHAR(1024),
+  alert_state VARCHAR(255) NOT NULL,
+  alert_text TEXT,
+  CONSTRAINT PK_alert_history PRIMARY KEY CLUSTERED (alert_id),
+  FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
+  FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_current') AND type = 'U')
+BEGIN
+CREATE TABLE alert_current (
+  alert_id BIGINT NOT NULL,
+  definition_id BIGINT NOT NULL,
+  history_id BIGINT NOT NULL UNIQUE,
+  maintenance_state VARCHAR(255) NOT NULL,
+  original_timestamp BIGINT NOT NULL,
+  latest_timestamp BIGINT NOT NULL,
+  latest_text TEXT,
+  occurrences BIGINT NOT NULL DEFAULT 1,
+  firmness VARCHAR(255) NOT NULL DEFAULT 'HARD',
+  CONSTRAINT PK_alert_current PRIMARY KEY CLUSTERED (alert_id),
+  FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+  FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group') AND type = 'U')
+BEGIN
+CREATE TABLE alert_group (
+  group_id BIGINT NOT NULL,
+  cluster_id BIGINT NOT NULL,
+  group_name VARCHAR(255) NOT NULL,
+  is_default SMALLINT NOT NULL DEFAULT 0,
+  service_name VARCHAR(255),
+  CONSTRAINT PK_alert_group PRIMARY KEY CLUSTERED (group_id),
+  CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target') AND type = 'U')
+BEGIN
+CREATE TABLE alert_target (
+  target_id BIGINT NOT NULL,
+  target_name VARCHAR(255) NOT NULL UNIQUE,
+  notification_type VARCHAR(64) NOT NULL,
+  properties TEXT,
+  description VARCHAR(1024),
+  is_global SMALLINT NOT NULL DEFAULT 0,
+  is_enabled SMALLINT NOT NULL DEFAULT 1,
+  CONSTRAINT PK_alert_target PRIMARY KEY CLUSTERED (target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_target_states') AND type = 'U')
+BEGIN
+CREATE TABLE alert_target_states (
+  target_id BIGINT NOT NULL,
+  alert_state VARCHAR(255) NOT NULL,
+  FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_group_target') AND type = 'U')
+BEGIN
+CREATE TABLE alert_group_target (
+  group_id BIGINT NOT NULL,
+  target_id BIGINT NOT NULL,
+  CONSTRAINT PK_alert_group_target PRIMARY KEY CLUSTERED (group_id, target_id),
+  FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
+  FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_grouping') AND type = 'U')
+BEGIN
+CREATE TABLE alert_grouping (
+  definition_id BIGINT NOT NULL,
+  group_id BIGINT NOT NULL,
+  CONSTRAINT PK_alert_grouping PRIMARY KEY CLUSTERED (group_id, definition_id),
+  FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+  FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('alert_notice') AND type = 'U')
+BEGIN
+CREATE TABLE alert_notice (
+  notification_id BIGINT NOT NULL,
+  target_id BIGINT NOT NULL,
+  history_id BIGINT NOT NULL,
+  notify_state VARCHAR(255) NOT NULL,
+  uuid VARCHAR(64) NOT NULL UNIQUE,
+  CONSTRAINT PK_alert_notice PRIMARY KEY CLUSTERED (notification_id),
+  FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
+  FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_def_id')
+BEGIN
+CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_service')
+BEGIN
+CREATE INDEX idx_alert_history_service on alert_history(service_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_host')
+BEGIN
+CREATE INDEX idx_alert_history_host on alert_history(host_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_time')
+BEGIN
+CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_history_state')
+BEGIN
+CREATE INDEX idx_alert_history_state on alert_history(alert_state)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_group_name')
+BEGIN
+CREATE INDEX idx_alert_group_name on alert_group(group_name)
+END
+
+IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = 'idx_alert_notice_state')
+BEGIN
+CREATE INDEX idx_alert_notice_state on alert_notice(notify_state)
+END
+
+
+---------inserting some data-----------
+BEGIN TRANSACTION
+  DELETE metainfo;
+  DELETE adminprivilege;
+  DELETE permission_roleauthorization;
+  DELETE roleauthorization;
+  DELETE adminpermission;
+  DELETE users;
+  DELETE adminprincipal;
+  DELETE adminprincipaltype;
+  DELETE adminresource;
+  DELETE adminresourcetype;
+  DELETE ambari_sequences;
+  INSERT INTO ambari_sequences (sequence_name, [sequence_value])
+  VALUES
+    ('cluster_id_seq', 1),
+    ('host_id_seq', 0),
+    ('user_id_seq', 2),
+    ('group_id_seq', 1),
+    ('member_id_seq', 1),
+    ('host_role_command_id_seq', 1),
+    ('configgroup_id_seq', 1),
+    ('requestschedule_id_seq', 1),
+    ('resourcefilter_id_seq', 1),
+    ('viewentity_id_seq', 0),
+    ('operation_level_id_seq', 1),
+    ('view_instance_id_seq', 1),
+    ('resource_type_id_seq', 4),
+    ('resource_id_seq', 2),
+    ('principal_type_id_seq', 8),
+    ('principal_id_seq', 13),
+    ('permission_id_seq', 7),
+    ('privilege_id_seq', 1),
+    ('alert_definition_id_seq', 0),
+    ('alert_group_id_seq', 0),
+    ('alert_target_id_seq', 0),
+    ('alert_history_id_seq', 0),
+    ('alert_notice_id_seq', 0),
+    ('alert_current_id_seq', 0),
+    ('config_id_seq', 11),
+    ('repo_version_id_seq', 0),
+    ('host_version_id_seq', 0),
+    ('service_config_id_seq', 1),
+    ('upgrade_id_seq', 0),
+    ('upgrade_group_id_seq', 0),
+    ('widget_id_seq', 0),
+    ('widget_layout_id_seq', 0),
+    ('upgrade_item_id_seq', 0),
+    ('stack_id_seq', 0),
+    ('extension_id_seq', 0),
+    ('link_id_seq', 0),
+    ('topology_host_info_id_seq', 0),
+    ('topology_host_request_id_seq', 0),
+    ('topology_host_task_id_seq', 0),
+    ('topology_logical_request_id_seq', 0),
+    ('topology_logical_task_id_seq', 0),
+    ('topology_request_id_seq', 0),
+    ('topology_host_group_id_seq', 0),
+    ('setting_id_seq', 0),
+    ('hostcomponentstate_id_seq', 0),
+    ('servicecomponentdesiredstate_id_seq', 0),
+    ('upgrade_history_id_seq', 0),
+    ('blueprint_setting_id_seq', 0),
+    ('ambari_operation_history_id_seq', 0),
+    ('remote_cluster_id_seq', 0),
+    ('remote_cluster_service_id_seq', 0),
+    ('servicecomponent_version_id_seq', 0),
+    ('hostcomponentdesiredstate_id_seq', 0)
+
+  insert into adminresourcetype (resource_type_id, resource_type_name)
+  values
+    (1, 'AMBARI'),
+    (2, 'CLUSTER'),
+    (3, 'VIEW')
+
+  insert into adminresource (resource_id, resource_type_id)
+    select 1, 1
+
+  insert into adminprincipaltype (principal_type_id, principal_type_name)
+  values
+    (1, 'USER'),
+    (2, 'GROUP'),
+    (8, 'ROLE')
+
+  insert into adminprincipal (principal_id, principal_type_id)
+  values
+    (1, 1),
+    (7, 8),
+    (8, 8),
+    (9, 8),
+    (10, 8),
+    (11, 8),
+    (12, 8),
+    (13, 8)
+
+  insert into users(user_id, principal_id, user_name, user_password)
+    select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00'
+
+  insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, principal_id, sort_order)
+  values
+    (1, 'AMBARI.ADMINISTRATOR', 1, 'Ambari Administrator', 7, 1),
+    (2, 'CLUSTER.USER', 2, 'Cluster User', 8, 6),
+    (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 9, 2),
+    (4, 'VIEW.USER', 3, 'View User', 10, 7),
+    (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 11, 3),
+    (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 12, 4),
+    (7, 'SERVICE.OPERATOR', 2, 'Service Operator', 13, 5)
+
+  INSERT INTO roleauthorization(authorization_id, authorization_name)
+    SELECT 'VIEW.USE', 'Use View' UNION ALL
+    SELECT 'SERVICE.VIEW_METRICS', 'View metrics' UNION ALL
+    SELECT 'SERVICE.VIEW_STATUS_INFO', 'View status information' UNION ALL
+    SELECT 'SERVICE.VIEW_CONFIGS', 'View configurations' UNION ALL
+    SELECT 'SERVICE.COMPARE_CONFIGS', 'Compare configurations' UNION ALL
+    SELECT 'SERVICE.VIEW_ALERTS', 'View service-level alerts' UNION ALL
+    SELECT 'SERVICE.START_STOP', 'Start/Stop/Restart Service' UNION ALL
+    SELECT 'SERVICE.DECOMMISSION_RECOMMISSION', 'Decommission/recommission' UNION ALL
+    SELECT 'SERVICE.RUN_SERVICE_CHECK', 'Run service checks' UNION ALL
+    SELECT 'SERVICE.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
+    SELECT 'SERVICE.RUN_CUSTOM_COMMAND', 'Perform service-specific tasks' UNION ALL
+    SELECT 'SERVICE.MODIFY_CONFIGS', 'Modify configurations' UNION ALL
+    SELECT 'SERVICE.MANAGE_ALERTS', 'Manage service-level alerts' UNION ALL
+    SELECT 'SERVICE.MANAGE_CONFIG_GROUPS', 'Manage configuration groups' UNION ALL
+    SELECT 'SERVICE.MOVE', 'Move service to another host' UNION ALL
+    SELECT 'SERVICE.ENABLE_HA', 'Enable HA' UNION ALL
+    SELECT 'SERVICE.TOGGLE_ALERTS', 'Enable/disable service-level alerts' UNION ALL
+    SELECT 'SERVICE.ADD_DELETE_SERVICES', 'Add/delete services' UNION ALL
+    SELECT 'SERVICE.VIEW_OPERATIONAL_LOGS', 'View service operational logs' UNION ALL
+    SELECT 'SERVICE.SET_SERVICE_USERS_GROUPS', 'Set service users and groups' UNION ALL
+    SELECT 'SERVICE.MANAGE_AUTO_START', 'Manage service auto-start' UNION ALL
+    SELECT 'HOST.VIEW_METRICS', 'View metrics' UNION ALL
+    SELECT 'HOST.VIEW_STATUS_INFO', 'View status information' UNION ALL
+    SELECT 'HOST.VIEW_CONFIGS', 'View configuration' UNION ALL
+    SELECT 'HOST.TOGGLE_MAINTENANCE', 'Turn on/off maintenance mode' UNION ALL
+    SELECT 'HOST.ADD_DELETE_COMPONENTS', 'Install components' UNION ALL
+    SELECT 'HOST.ADD_DELETE_HOSTS', 'Add/Delete hosts' UNION ALL
+    SELECT 'CLUSTER.VIEW_METRICS', 'View metrics' UNION ALL
+    SELECT 'CLUSTER.VIEW_STATUS_INFO', 'View status information' UNION ALL
+    SELECT 'CLUSTER.VIEW_CONFIGS', 'View configuration' UNION ALL
+    SELECT 'CLUSTER.VIEW_STACK_DETAILS', 'View stack version details' UNION ALL
+    SELECT 'CLUSTER.VIEW_ALERTS', 'View cluster-level alerts' UNION ALL
+    SELECT 'CLUSTER.MANAGE_CREDENTIALS', 'Manage external credentials' UNION ALL
+    SELECT 'CLUSTER.MODIFY_CONFIGS', 'Modify cluster configurations' UNION ALL
+    SELECT 'CLUSTER.MANAGE_ALERTS', 'Manage cluster-level alerts' UNION ALL
+    SELECT 'CLUSTER.MANAGE_USER_PERSISTED_DATA', 'Manage cluster-level user persisted data' UNION ALL
+    SELECT 'CLUSTER.TOGGLE_ALERTS', 'Enable/disable cluster-level alerts' UNION ALL
+    SELECT 'CLUSTER.MANAGE_CONFIG_GROUPS', 'Manage cluster config groups' UNION ALL
+    SELECT 'CLUSTER.TOGGLE_KERBEROS', 'Enable/disable Kerberos' UNION ALL
+    SELECT 'CLUSTER.UPGRADE_DOWNGRADE_STACK', 'Upgrade/downgrade stack' UNION ALL
+    SELECT 'CLUSTER.RUN_CUSTOM_COMMAND', 'Perform custom cluster-level actions' UNION ALL
+    SELECT 'CLUSTER.MANAGE_AUTO_START', 'Manage service auto-start configuration' UNION ALL
+    SELECT 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS', 'Manage alert notifications configuration' UNION ALL
+    SELECT 'AMBARI.ADD_DELETE_CLUSTERS', 'Create new clusters' UNION ALL
+    SELECT 'AMBARI.RENAME_CLUSTER', 'Rename clusters' UNION ALL
+    SELECT 'AMBARI.MANAGE_SETTINGS', 'Manage settings' UNION ALL
+    SELECT 'AMBARI.MANAGE_USERS', 'Manage users' UNION ALL
+    SELECT 'AMBARI.MANAGE_GROUPS', 'Manage groups' UNION ALL
+    SELECT 'AMBARI.MANAGE_VIEWS', 'Manage Ambari Views' UNION ALL
+    SELECT 'AMBARI.ASSIGN_ROLES', 'Assign roles' UNION ALL
+    SELECT 'AMBARI.MANAGE_STACK_VERSIONS', 'Manage stack versions' UNION ALL
+    SELECT 'AMBARI.EDIT_STACK_REPOS', 'Edit stack repository URLs' UNION ALL
+    SELECT 'AMBARI.RUN_CUSTOM_COMMAND', 'Perform custom administrative actions'
+
+  -- Set authorizations for View User role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='VIEW.USER'
+
+  -- Set authorizations for Cluster User role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.USER' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.USER'
+
+  -- Set authorizations for Service Operator role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.OPERATOR'
+
+  -- Set authorizations for Service Administrator role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='SERVICE.ADMINISTRATOR'
+
+  -- Set authorizations for Cluster Operator role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.OPERATOR'
+
+  -- Set authorizations for Cluster Administrator role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='CLUSTER.ADMINISTRATOR'
+
+  -- Set authorizations for Administrator role
+  INSERT INTO permission_roleauthorization(permission_id, authorization_id)
+    SELECT permission_id, 'VIEW.USE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.COMPARE_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.START_STOP' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.DECOMMISSION_RECOMMISSION' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_SERVICE_CHECK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MOVE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.ENABLE_HA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.ADD_DELETE_SERVICES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.VIEW_OPERATIONAL_LOGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.SET_SERVICE_USERS_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'SERVICE.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.TOGGLE_MAINTENANCE' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_COMPONENTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'HOST.ADD_DELETE_HOSTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_METRICS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STATUS_INFO' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_STACK_DETAILS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.VIEW_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CREDENTIALS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MODIFY_CONFIGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_CONFIG_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.TOGGLE_ALERTS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.TOGGLE_KERBEROS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.UPGRADE_DOWNGRADE_STACK' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_USER_PERSISTED_DATA' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_AUTO_START' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.MANAGE_ALERT_NOTIFICATIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'CLUSTER.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.ADD_DELETE_CLUSTERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.RENAME_CLUSTER' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.MANAGE_SETTINGS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.MANAGE_USERS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.MANAGE_GROUPS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.MANAGE_VIEWS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.ASSIGN_ROLES' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.MANAGE_STACK_VERSIONS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.EDIT_STACK_REPOS' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR' UNION ALL
+    SELECT permission_id, 'AMBARI.RUN_CUSTOM_COMMAND' FROM adminpermission WHERE permission_name='AMBARI.ADMINISTRATOR'
+
+  insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
+    select 1, 1, 1, 1
+
+  insert into metainfo(metainfo_key, metainfo_value)
+    select 'version','${ambariSchemaVersion}'
+COMMIT TRANSACTION
+
+-- Quartz tables
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_job_details') AND type = 'U')
+BEGIN
+CREATE TABLE qrtz_job_details
+  (
+    SCHED_NAME VARCHAR(120) NOT NULL,
+    JOB_NAME  VARCHAR(200) NOT NULL,
+    JOB_GROUP VARCHAR(200) NOT NULL,
+    DESCRIPTION VARCHAR(250) NULL,
+    JOB_CLASS_NAME   VARCHAR(250) NOT NULL,
+    IS_DURABLE BIT NOT NULL,
+    IS_NONCONCURRENT BIT NOT NULL,
+    IS_UPDATE_DATA BIT NOT NULL,
+    REQUESTS_RECOVERY BIT NOT NULL,
+    JOB_DATA VARBINARY(MAX) NULL,
+    PRIMARY KEY CLUSTERED (SCHED_NAME,JOB_NAME,JOB_GROUP)
+)
+END
+
+
+IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('qrtz_triggers') AND type = 'U')
+BEGIN
+CREATE TABLE qrtz_triggers
+  (
+    SCHED_NAME VARCHAR(120) NOT NULL,
+    TRIGGER_NAME VARCHAR(200) NOT NULL,
+    TRIGGER_GROUP VARCHAR(200) NOT NULL,
+    JOB_NAME  VARCHAR(200) NOT NULL,
+    JOB_GROUP VARCHAR(200) NOT NULL,
+    DESCRIPTION VARCHAR(250)

<TRUNCATED>

[17/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714

Posted by ja...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c36afcdd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c36afcdd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c36afcdd

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: c36afcdd051fa28cff2d299f1b97ed92e32fbbe5
Parents: ba1ec6d e61556c
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sun Oct 8 23:00:20 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sun Oct 8 23:00:20 2017 -0700

----------------------------------------------------------------------
 ambari-agent/pom.xml                            |   4 +-
 .../main/python/ambari_agent/AmbariConfig.py    |  74 +-
 .../ambari_agent/CustomServiceOrchestrator.py   |   6 +-
 .../src/main/python/ambari_agent/FileCache.py   |   5 +-
 .../main/python/ambari_agent/ProcessHelper.py   |  71 --
 .../src/main/python/ambari_agent/StatusCheck.py | 142 ----
 .../src/main/python/ambari_agent/main.py        |  12 +-
 .../test/python/ambari_agent/TestFileCache.py   |   4 +-
 .../test/python/ambari_agent/TestLiveStatus.py  |   5 +-
 .../src/test/python/ambari_agent/TestMain.py    |  13 +-
 .../python/ambari_agent/TestProcessHelper.py    |  70 --
 .../test/python/ambari_agent/TestSecurity.py    |   1 -
 .../test/python/ambari_agent/TestStatusCheck.py | 180 -----
 .../resource_management/TestPackageResource.py  |   6 +-
 .../python/resource_management/TestScript.py    |  26 +-
 .../core/providers/package/__init__.py          |   4 +-
 .../core/providers/package/apt.py               |  35 +-
 .../core/providers/package/choco.py             |   8 +-
 .../core/providers/package/yumrpm.py            |  10 +-
 .../core/providers/package/zypper.py            |   7 +-
 .../core/resources/packaging.py                 |  14 +-
 .../libraries/functions/component_version.py    |  26 +-
 .../libraries/functions/conf_select.py          | 362 +++------
 .../libraries/functions/repository_util.py      |   8 +-
 .../libraries/functions/stack_select.py         |  69 +-
 .../libraries/script/script.py                  |  22 +-
 ambari-infra/ambari-infra-assembly/pom.xml      |   8 +
 .../src/main/package/deb/manager/postinst       |   5 +
 .../src/main/package/deb/solr-client/postinst   |  13 +
 .../src/main/package/rpm/manager/postinstall.sh |  20 +
 .../main/package/rpm/solr-client/postinstall.sh |  28 +
 .../org/apache/ambari/infra/InfraManager.java   |   4 +-
 .../conf/batch/InfraManagerBatchConfig.java     |  86 --
 .../infra/job/dummy/DummyJobConfiguration.java  | 108 +++
 .../src/main/resources/infraManager.sh          |  10 +-
 ambari-infra/ambari-infra-solr-client/build.xml |   3 +
 .../src/main/python/solrDataManager.py          | 150 ++--
 .../src/main/resources/solrIndexHelper.sh       |   5 +-
 .../logsearch/steps/LogSearchUISteps.java       |   4 +-
 .../resources/stories/selenium/login.ui.story   |   4 +-
 .../ambari-logsearch-web/package.json           |  17 +-
 ambari-logsearch/ambari-logsearch-web/pom.xml   |   6 +-
 .../ambari-logsearch-web/src/app/app.module.ts  |   6 +
 .../src/app/classes/active-service-log-entry.ts |  23 +
 .../src/app/classes/histogram-options.ts        |  36 +
 .../src/app/classes/list-item.class.ts          |  25 -
 .../src/app/classes/list-item.ts                |  26 +
 .../src/app/classes/models/app-settings.ts      |  27 +
 .../src/app/classes/models/app-state.ts         |  43 +
 .../src/app/classes/models/audit-log-field.ts   | 225 ++++++
 .../src/app/classes/models/audit-log.ts         |  46 ++
 .../src/app/classes/models/bar-graph.ts         |  24 +
 .../src/app/classes/models/common-entry.ts      |  22 +
 .../src/app/classes/models/count.ts             |  22 +
 .../src/app/classes/models/filter.ts            |  25 +
 .../src/app/classes/models/graph.ts             |  23 +
 .../src/app/classes/models/log-field.ts         |  27 +
 .../src/app/classes/models/log.ts               |  38 +
 .../src/app/classes/models/node.ts              |  30 +
 .../src/app/classes/models/service-log-field.ts | 107 +++
 .../src/app/classes/models/service-log.ts       |  27 +
 .../app/classes/models/solr-collection-state.ts |  23 +
 .../src/app/classes/models/store.ts             | 180 +++++
 .../src/app/classes/models/user-config.ts       |  26 +
 .../queries/audit-logs-query-params.class.ts    |  46 --
 .../classes/queries/audit-logs-query-params.ts  |  46 ++
 .../app/classes/queries/query-params.class.ts   |  23 -
 .../src/app/classes/queries/query-params.ts     |  23 +
 ...ce-logs-histogram-query-params.class.spec.ts | 203 -----
 ...service-logs-histogram-query-params.class.ts |  70 --
 .../service-logs-histogram-query-params.spec.ts | 203 +++++
 .../service-logs-histogram-query-params.ts      |  70 ++
 .../queries/service-logs-query-params.class.ts  |  30 -
 .../queries/service-logs-query-params.ts        |  30 +
 .../service-logs-truncated-query-params.ts      |  36 +
 .../app/classes/service-log-context-entry.ts    |  26 +
 .../dropdown-button.component.html              |   3 +-
 .../dropdown-button.component.spec.ts           |   5 +-
 .../dropdown-button.component.ts                |   2 +-
 .../dropdown-list/dropdown-list.component.html  |   2 +-
 .../dropdown-list.component.spec.ts             |  12 +-
 .../dropdown-list/dropdown-list.component.ts    |  11 +-
 .../filter-button.component.spec.ts             |   5 +-
 .../filter-button/filter-button.component.ts    |   2 +-
 .../filter-dropdown.component.spec.ts           |   5 +-
 .../filters-panel/filters-panel.component.html  |   3 +-
 .../filters-panel/filters-panel.component.less  |   2 +-
 .../filters-panel.component.spec.ts             |   5 +-
 .../filters-panel/filters-panel.component.ts    |   4 +-
 .../log-context/log-context.component.html      |  33 +
 .../log-context/log-context.component.less      |  23 +
 .../log-context/log-context.component.spec.ts   | 108 +++
 .../log-context/log-context.component.ts        |  91 +++
 .../log-file-entry.component.html               |  20 +
 .../log-file-entry.component.less               |  31 +
 .../log-file-entry.component.spec.ts            |  56 ++
 .../log-file-entry/log-file-entry.component.ts  |  51 ++
 .../logs-container.component.html               |  12 +-
 .../logs-container.component.spec.ts            |   5 +-
 .../logs-container/logs-container.component.ts  |  44 +-
 .../logs-list/logs-list.component.html          |  30 +-
 .../logs-list/logs-list.component.less          |  36 +-
 .../logs-list/logs-list.component.spec.ts       |   3 +
 .../components/logs-list/logs-list.component.ts |  34 +-
 .../main-container.component.html               |   7 +
 .../main-container.component.less               |   4 +
 .../main-container.component.spec.ts            |  13 +-
 .../main-container/main-container.component.ts  |  32 +-
 .../menu-button/menu-button.component.spec.ts   |   5 +-
 .../menu-button/menu-button.component.ts        |   2 +-
 .../search-box/search-box.component.ts          |   2 +-
 .../time-histogram.component.less               |  22 +-
 .../time-histogram/time-histogram.component.ts  |  94 ++-
 .../timezone-picker.component.spec.ts           |   5 +-
 .../src/app/components/variables.less           |  30 +
 .../src/app/models/app-settings.model.ts        |  27 -
 .../src/app/models/app-state.model.ts           |  35 -
 .../src/app/models/audit-log-field.model.ts     | 225 ------
 .../src/app/models/audit-log.model.ts           |  46 --
 .../src/app/models/bar-graph.model.ts           |  24 -
 .../src/app/models/common-entry.model.ts        |  22 -
 .../src/app/models/count.model.ts               |  22 -
 .../src/app/models/filter.model.ts              |  25 -
 .../src/app/models/graph.model.ts               |  23 -
 .../src/app/models/log-field.model.ts           |  27 -
 .../src/app/models/log.model.ts                 |  37 -
 .../src/app/models/node.model.ts                |  29 -
 .../src/app/models/service-log-field.model.ts   | 107 ---
 .../src/app/models/service-log.model.ts         |  27 -
 .../app/models/solr-collection-state.model.ts   |  23 -
 .../src/app/models/store.model.ts               | 169 ----
 .../src/app/models/user-config.model.ts         |  26 -
 .../services/component-actions.service.spec.ts  |   8 +-
 .../app/services/component-actions.service.ts   |  52 +-
 .../component-generator.service.spec.ts         |  10 +-
 .../app/services/component-generator.service.ts |  23 +-
 .../src/app/services/filtering.service.spec.ts  |  27 +
 .../src/app/services/filtering.service.ts       |  97 ++-
 .../src/app/services/http-client.service.ts     |  15 +-
 .../app/services/logs-container.service.spec.ts |   8 +-
 .../src/app/services/logs-container.service.ts  |  70 +-
 .../services/storage/app-settings.service.ts    |   4 +-
 .../app/services/storage/app-state.service.ts   |   4 +-
 .../storage/audit-logs-fields.service.ts        |   2 +-
 .../app/services/storage/audit-logs.service.ts  |   2 +-
 .../app/services/storage/clusters.service.ts    |   2 +-
 .../app/services/storage/components.service.ts  |   2 +-
 .../src/app/services/storage/filters.service.ts |   2 +-
 .../src/app/services/storage/graphs.service.ts  |   2 +-
 .../src/app/services/storage/hosts.service.ts   |   2 +-
 .../app/services/storage/reducers.service.ts    |   2 +
 .../storage/service-logs-fields.service.ts      |   2 +-
 .../service-logs-histogram-data.service.ts      |   2 +-
 .../storage/service-logs-truncated.service.ts   |  32 +
 .../services/storage/service-logs.service.ts    |   2 +-
 .../services/storage/user-configs.service.ts    |   2 +-
 .../src/assets/i18n/en.json                     |   7 +-
 .../ambari-logsearch-web/webpack.config.js      |  21 +-
 ambari-logsearch/ambari-logsearch-web/yarn.lock | 288 +++++--
 .../timeline/HBaseTimelineMetricStore.java      |   8 +-
 .../timeline/HBaseTimelineMetricStoreTest.java  |  36 +-
 ambari-server/pom.xml                           |   2 +
 ambari-server/src/main/assemblies/server.xml    |  20 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   3 +-
 .../ambari/server/actionmanager/Stage.java      |   3 +-
 .../ambari/server/agent/HeartbeatMonitor.java   |   3 +-
 .../ambari/server/agent/StatusCommand.java      |  16 +
 .../server/api/services/AmbariMetaInfo.java     |  61 +-
 .../AmbariManagementControllerImpl.java         |  22 +-
 .../server/controller/ControllerModule.java     |   4 +
 .../server/controller/KerberosHelperImpl.java   |  16 +-
 .../controller/ResourceProviderFactory.java     |   8 +
 .../server/controller/StackVersionResponse.java |  29 -
 .../AbstractControllerResourceProvider.java     |   4 +
 .../internal/AlertTargetResourceProvider.java   |   3 +-
 .../internal/ClientConfigResourceProvider.java  |   2 -
 .../internal/DefaultProviderModule.java         |   4 -
 .../internal/StackArtifactResourceProvider.java |  35 +-
 .../internal/UpgradeResourceProvider.java       |  77 +-
 .../internal/ViewInstanceResourceProvider.java  | 147 ++--
 .../upgrade/HostVersionOutOfSyncListener.java   |   4 +
 .../ambari/server/mpack/MpackGenerator.java     |  17 -
 .../orm/entities/RepositoryVersionEntity.java   |  34 +-
 .../server/orm/entities/WidgetLayoutEntity.java |   6 +-
 .../upgrades/AbstractUpgradeServerAction.java   |   6 +-
 .../upgrades/AutoSkipFailedSummaryAction.java   |  15 +-
 .../upgrades/ComponentVersionCheckAction.java   |   2 +-
 .../serveraction/upgrades/ConfigureAction.java  |  11 +-
 .../upgrades/FinalizeUpgradeAction.java         |   7 +-
 .../FixCapacitySchedulerOrderingPolicy.java     |  12 +-
 .../serveraction/upgrades/FixLzoCodecPath.java  |  10 +-
 .../upgrades/FixOozieAdminUsers.java            |  10 +-
 .../upgrades/FixYarnWebServiceUrl.java          |  11 +-
 .../upgrades/HBaseConfigCalculation.java        |  10 +-
 .../HBaseEnvMaxDirectMemorySizeAction.java      |  11 +-
 .../upgrades/HiveEnvClasspathAction.java        |  11 +-
 .../upgrades/HiveZKQuorumConfigAction.java      |  13 +-
 .../upgrades/KerberosKeytabsAction.java         |  13 +-
 .../upgrades/ManualStageAction.java             |   3 +-
 .../upgrades/OozieConfigCalculation.java        |  11 +-
 .../upgrades/PreconfigureKerberosAction.java    |   2 +-
 .../upgrades/RangerConfigCalculation.java       |  11 +-
 .../RangerKerberosConfigCalculation.java        |  11 +-
 .../upgrades/RangerKmsProxyConfig.java          |  11 +-
 .../RangerUsersyncConfigCalculation.java        |  11 +-
 .../upgrades/RangerWebAlertConfigAction.java    |   9 +-
 .../upgrades/SparkShufflePropertyConfig.java    |  11 +-
 .../upgrades/UpdateDesiredRepositoryAction.java |  10 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java |   6 +-
 .../upgrades/YarnConfigCalculation.java         |  11 +-
 .../ambari/server/stack/ServiceDirectory.java   |   3 +-
 .../ambari/server/stack/StackContext.java       |  15 +-
 .../ambari/server/stack/StackDirectory.java     |  74 +-
 .../ambari/server/stack/StackManager.java       |  11 +-
 .../apache/ambari/server/stack/StackModule.java |  16 -
 .../org/apache/ambari/server/state/Host.java    |  17 +
 .../apache/ambari/server/state/ServiceInfo.java |   2 +-
 .../apache/ambari/server/state/StackInfo.java   |  39 -
 .../server/state/cluster/ClusterImpl.java       |   2 +-
 .../ambari/server/state/host/HostImpl.java      |  47 ++
 .../AbstractKerberosDescriptorContainer.java    |  26 +-
 .../state/kerberos/KerberosDescriptor.java      |   3 +-
 .../ambari/server/state/stack/UpgradePack.java  |   6 +
 .../stack/upgrade/ServerSideActionTask.java     |  15 +
 .../state/stack/upgrade/TaskParameter.java      |  41 +
 .../svccomphost/ServiceComponentHostImpl.java   |   9 +-
 .../server/topology/AsyncCallableService.java   | 110 ++-
 .../ambari/server/topology/TopologyManager.java |  26 +-
 .../topology/tasks/ConfigureClusterTask.java    | 124 +--
 .../server/upgrade/UpgradeCatalog260.java       |  31 +
 .../ambari_server/dbConfiguration_linux.py      |   9 +-
 .../python/ambari_server/resourceFilesKeeper.py |   7 +-
 .../python/ambari_server/serverConfiguration.py |   2 +-
 .../main/python/ambari_server/setupMpacks.py    |   2 +-
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |   6 +
 .../0.1.0/configuration/infra-solr-env.xml      |   6 +
 .../0.1.0/configuration/ams-env.xml             |   6 +
 .../AMBARI_METRICS/0.1.0/service_advisor.py     | 787 +++++++++++++++++++
 .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml |   6 +
 .../ATLAS/0.7.0.3.0/configuration/atlas-env.xml |   6 +
 .../ATLAS/0.7.0.3.0/service_advisor.py          |   5 +-
 .../DRUID/0.10.1/configuration/druid-broker.xml | 106 +++
 .../DRUID/0.10.1/configuration/druid-common.xml | 270 +++++++
 .../0.10.1/configuration/druid-coordinator.xml  |  43 +
 .../DRUID/0.10.1/configuration/druid-env.xml    | 248 ++++++
 .../0.10.1/configuration/druid-historical.xml   |  94 +++
 .../DRUID/0.10.1/configuration/druid-log4j.xml  |  84 ++
 .../0.10.1/configuration/druid-logrotate.xml    |  68 ++
 .../configuration/druid-middlemanager.xml       | 122 +++
 .../0.10.1/configuration/druid-overlord.xml     |  52 ++
 .../DRUID/0.10.1/configuration/druid-router.xml |  59 ++
 .../common-services/DRUID/0.10.1/metainfo.xml   | 223 ++++++
 .../DRUID/0.10.1/package/scripts/broker.py      |  28 +
 .../DRUID/0.10.1/package/scripts/coordinator.py |  28 +
 .../DRUID/0.10.1/package/scripts/druid.py       | 307 ++++++++
 .../DRUID/0.10.1/package/scripts/druid_node.py  | 114 +++
 .../DRUID/0.10.1/package/scripts/historical.py  |  28 +
 .../0.10.1/package/scripts/middlemanager.py     |  28 +
 .../DRUID/0.10.1/package/scripts/overlord.py    |  28 +
 .../DRUID/0.10.1/package/scripts/params.py      | 200 +++++
 .../DRUID/0.10.1/package/scripts/router.py      |  28 +
 .../0.10.1/package/scripts/service_check.py     |  44 ++
 .../0.10.1/package/scripts/status_params.py     |  24 +
 .../DRUID/0.10.1/quicklinks/quicklinks.json     |  37 +
 .../DRUID/0.10.1/role_command_order.json        |  17 +
 .../DRUID/0.10.1/themes/theme.json              | 120 +++
 .../DRUID/0.9.2/configuration/druid-broker.xml  | 100 ---
 .../DRUID/0.9.2/configuration/druid-common.xml  | 270 -------
 .../0.9.2/configuration/druid-coordinator.xml   |  43 -
 .../DRUID/0.9.2/configuration/druid-env.xml     | 242 ------
 .../0.9.2/configuration/druid-historical.xml    |  88 ---
 .../DRUID/0.9.2/configuration/druid-log4j.xml   |  84 --
 .../0.9.2/configuration/druid-logrotate.xml     |  68 --
 .../0.9.2/configuration/druid-middlemanager.xml | 122 ---
 .../0.9.2/configuration/druid-overlord.xml      |  52 --
 .../DRUID/0.9.2/configuration/druid-router.xml  |  59 --
 .../common-services/DRUID/0.9.2/metainfo.xml    | 223 ------
 .../DRUID/0.9.2/package/scripts/broker.py       |  28 -
 .../DRUID/0.9.2/package/scripts/coordinator.py  |  28 -
 .../DRUID/0.9.2/package/scripts/druid.py        | 307 --------
 .../DRUID/0.9.2/package/scripts/druid_node.py   | 114 ---
 .../DRUID/0.9.2/package/scripts/historical.py   |  28 -
 .../0.9.2/package/scripts/middlemanager.py      |  28 -
 .../DRUID/0.9.2/package/scripts/overlord.py     |  28 -
 .../DRUID/0.9.2/package/scripts/params.py       | 200 -----
 .../DRUID/0.9.2/package/scripts/router.py       |  28 -
 .../0.9.2/package/scripts/service_check.py      |  44 --
 .../0.9.2/package/scripts/status_params.py      |  24 -
 .../DRUID/0.9.2/quicklinks/quicklinks.json      |  37 -
 .../DRUID/0.9.2/role_command_order.json         |  17 -
 .../DRUID/0.9.2/themes/theme.json               | 120 ---
 .../0.5.0.2.1/configuration/falcon-env.xml      |   6 +
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |   6 +
 .../0.96.0.2.0/configuration/hbase-env.xml      |   6 +
 .../HBASE/2.0.0.3.0/configuration/hbase-env.xml |   6 +
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  10 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   1 +
 .../common-services/HDFS/2.1.0.2.0/widgets.json |   4 +-
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml |  10 +
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  |   1 +
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  12 +
 .../package/scripts/hive_server_upgrade.py      |   5 -
 .../0.12.0.2.0/package/scripts/params_linux.py  |   5 +-
 .../0.12.0.2.0/package/scripts/status_params.py |   8 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |   2 +-
 .../HIVE/2.1.0.3.0/configuration/hive-env.xml   |  12 +
 .../0.10.0.3.0/configuration/kafka-env.xml      |   6 +
 .../KAFKA/0.8.1/configuration/kafka-env.xml     |   6 +
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml   |  10 +
 .../KNOX/0.5.0.3.0/configuration/knox-env.xml   |  10 +
 .../0.5.0/configuration/logsearch-env.xml       |   6 +
 .../1.0.0.2.3/configuration/mahout-env.xml      |   6 +
 .../OOZIE/4.0.0.2.0/configuration/oozie-env.xml |  10 +
 .../OOZIE/4.2.0.3.0/configuration/oozie-env.xml |  10 +
 .../RANGER/0.4.0/configuration/ranger-env.xml   |  10 +
 .../0.4.0/package/scripts/ranger_admin.py       |  13 +-
 .../1.0.0.3.0/configuration/ranger-env.xml      |  10 +
 .../0.5.0.2.3/configuration/kms-env.xml         |  10 +
 .../1.0.0.3.0/configuration/kms-env.xml         |  10 +
 .../RANGER_KMS/1.0.0.3.0/service_advisor.py     |   3 +-
 .../SPARK/1.2.1/configuration/spark-env.xml     |  10 +
 .../SPARK/2.2.0/configuration/livy-env.xml      |  10 +
 .../SPARK/2.2.0/configuration/spark-env.xml     |  10 +
 .../SPARK2/2.0.0/configuration/spark2-env.xml   |  10 +
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml |   6 +
 .../SQOOP/1.4.4.3.0/configuration/sqoop-env.xml |   6 +
 .../STORM/0.9.1/configuration/storm-env.xml     |   6 +
 .../STORM/1.0.1.3.0/configuration/storm-env.xml |   6 +
 .../0.15.0/configuration/superset-env.xml       |   6 +
 .../TEZ/0.4.0.2.1/configuration/tez-env.xml     |  10 +
 .../TEZ/0.9.0.3.0/configuration/tez-env.xml     |  10 +
 .../configuration-mapred/mapred-env.xml         |   6 +
 .../YARN/2.1.0.2.0/configuration/yarn-env.xml   |   6 +
 .../configuration-mapred/mapred-env.xml         |   6 +
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   |   6 +
 .../0.6.0/configuration/zeppelin-env.xml        |  10 +
 .../0.7.0/configuration/zeppelin-env.xml        |  10 +
 .../ZEPPELIN/0.7.0/package/scripts/master.py    |  25 +-
 .../3.4.5/configuration/zookeeper-env.xml       |   6 +
 .../src/main/resources/configuration-schema.xsd |   2 +-
 .../custom_actions/scripts/install_packages.py  |  35 +-
 ambari-server/src/main/resources/kerberos.json  |  79 ++
 .../main/resources/scripts/Ambaripreupload.py   |   4 +-
 .../scripts/post-user-creation-hook.sh          |   2 +-
 .../stack-hooks/after-INSTALL/scripts/hook.py   |  37 +
 .../stack-hooks/after-INSTALL/scripts/params.py | 108 +++
 .../scripts/shared_initialization.py            | 132 ++++
 .../before-ANY/files/changeToSecureUid.sh       |  64 ++
 .../stack-hooks/before-ANY/scripts/hook.py      |  36 +
 .../stack-hooks/before-ANY/scripts/params.py    | 254 ++++++
 .../before-ANY/scripts/shared_initialization.py | 273 +++++++
 .../stack-hooks/before-INSTALL/scripts/hook.py  |  37 +
 .../before-INSTALL/scripts/params.py            | 115 +++
 .../scripts/repo_initialization.py              |  75 ++
 .../scripts/shared_initialization.py            |  37 +
 .../stack-hooks/before-RESTART/scripts/hook.py  |  29 +
 .../before-START/files/checkForFormat.sh        |  65 ++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 0 -> 28296600 bytes
 .../before-START/files/task-log4j.properties    | 134 ++++
 .../before-START/files/topology_script.py       |  66 ++
 .../before-START/scripts/custom_extensions.py   | 173 ++++
 .../stack-hooks/before-START/scripts/hook.py    |  43 +
 .../stack-hooks/before-START/scripts/params.py  | 380 +++++++++
 .../before-START/scripts/rack_awareness.py      |  48 ++
 .../scripts/shared_initialization.py            | 256 ++++++
 .../templates/commons-logging.properties.j2     |  43 +
 .../templates/exclude_hosts_list.j2             |  21 +
 .../templates/hadoop-metrics2.properties.j2     | 107 +++
 .../before-START/templates/health_check.j2      |  81 ++
 .../templates/include_hosts_list.j2             |  21 +
 .../templates/topology_mappings.data.j2         |  24 +
 .../HDP/2.0.6/configuration/cluster-env.xml     |  10 +
 .../2.0.6/hooks/after-INSTALL/scripts/hook.py   |  37 -
 .../2.0.6/hooks/after-INSTALL/scripts/params.py | 115 ---
 .../scripts/shared_initialization.py            | 132 ----
 .../hooks/before-ANY/files/changeToSecureUid.sh |  64 --
 .../HDP/2.0.6/hooks/before-ANY/scripts/hook.py  |  36 -
 .../2.0.6/hooks/before-ANY/scripts/params.py    | 277 -------
 .../before-ANY/scripts/shared_initialization.py | 281 -------
 .../2.0.6/hooks/before-INSTALL/scripts/hook.py  |  37 -
 .../hooks/before-INSTALL/scripts/params.py      | 115 ---
 .../scripts/repo_initialization.py              |  75 --
 .../scripts/shared_initialization.py            |  37 -
 .../2.0.6/hooks/before-RESTART/scripts/hook.py  |  29 -
 .../hooks/before-START/files/checkForFormat.sh  |  65 --
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 ----
 .../hooks/before-START/files/topology_script.py |  66 --
 .../before-START/scripts/custom_extensions.py   | 173 ----
 .../2.0.6/hooks/before-START/scripts/hook.py    |  43 -
 .../2.0.6/hooks/before-START/scripts/params.py  | 387 ---------
 .../before-START/scripts/rack_awareness.py      |  48 --
 .../scripts/shared_initialization.py            | 256 ------
 .../templates/commons-logging.properties.j2     |  43 -
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ---
 .../before-START/templates/health_check.j2      |  81 --
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 -
 .../resources/stacks/HDP/2.0.6/kerberos.json    |  79 --
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 562 +------------
 .../resources/stacks/HDP/2.0.6/widgets.json     |  95 ---
 .../services/HBASE/configuration/hbase-env.xml  |   6 +
 .../stacks/HDP/2.2/services/stack_advisor.py    |   1 -
 .../services/ECS/package/scripts/ecs_client.py  |   2 +-
 .../services/ECS/package/scripts/params.py      |   2 +-
 .../services/ATLAS/configuration/atlas-env.xml  |   6 +
 .../HIVE/configuration/llap-daemon-log4j.xml    |   2 +-
 .../services/SPARK/configuration/livy-env.xml   |  10 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |   3 +-
 .../stacks/HDP/2.6/kerberos_preconfigure.json   |   9 +
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml  |   5 +-
 .../HIVE/configuration/tez-interactive-site.xml |   6 +
 .../services/SPARK/configuration/livy-env.xml   |  10 +
 .../services/YARN/configuration/yarn-site.xml   |  18 +
 .../ZEPPELIN/configuration/zeppelin-env.xml     |  10 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  28 +-
 .../HDP/3.0/configuration/cluster-env.xml       |  10 +
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |  37 -
 .../3.0/hooks/after-INSTALL/scripts/params.py   | 109 ---
 .../scripts/shared_initialization.py            | 140 ----
 .../hooks/before-ANY/files/changeToSecureUid.sh |  53 --
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |  36 -
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 259 ------
 .../before-ANY/scripts/shared_initialization.py | 239 ------
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |  37 -
 .../3.0/hooks/before-INSTALL/scripts/params.py  | 115 ---
 .../scripts/repo_initialization.py              |  76 --
 .../scripts/shared_initialization.py            |  37 -
 .../3.0/hooks/before-RESTART/scripts/hook.py    |  29 -
 .../hooks/before-START/files/checkForFormat.sh  |  65 --
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 ----
 .../hooks/before-START/files/topology_script.py |  66 --
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |  40 -
 .../3.0/hooks/before-START/scripts/params.py    | 364 ---------
 .../before-START/scripts/rack_awareness.py      |  47 --
 .../scripts/shared_initialization.py            | 249 ------
 .../templates/commons-logging.properties.j2     |  43 -
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ---
 .../before-START/templates/health_check.j2      |  81 --
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 -
 .../main/resources/stacks/HDP/3.0/kerberos.json |  79 --
 .../main/resources/stacks/HDP/3.0/widgets.json  |  95 ---
 .../src/main/resources/upgrade-pack.xsd         |   9 +
 ambari-server/src/main/resources/widgets.json   |  95 +++
 .../server/api/services/AmbariMetaInfoTest.java |  49 +-
 .../AmbariManagementControllerImplTest.java     |   6 +-
 .../AmbariManagementControllerTest.java         |  17 +
 .../internal/UpgradeResourceProviderTest.java   |  33 +-
 .../upgrade/StackVersionListenerTest.java       |   1 -
 .../apache/ambari/server/orm/OrmTestHelper.java |   2 +-
 .../FixCapacitySchedulerOrderingPolicyTest.java |   2 +-
 .../upgrades/FixOozieAdminUsersTest.java        |   2 +-
 .../upgrades/FixYarnWebServiceUrlTest.java      |   2 +-
 .../HBaseEnvMaxDirectMemorySizeActionTest.java  |   2 +-
 .../upgrades/HiveEnvClasspathActionTest.java    |   2 +-
 .../upgrades/HiveZKQuorumConfigActionTest.java  |   2 +-
 .../upgrades/KerberosKeytabsActionTest.java     |  17 +
 .../upgrades/RangerConfigCalculationTest.java   |   2 +-
 .../RangerKerberosConfigCalculationTest.java    |   2 +-
 .../upgrades/RangerKmsProxyConfigTest.java      |   2 +-
 .../RangerUsersyncConfigCalculationTest.java    |   2 +-
 .../RangerWebAlertConfigActionTest.java         |   6 +-
 .../SparkShufflePropertyConfigTest.java         |   2 +-
 .../UpgradeUserKerberosDescriptorTest.java      |   4 +-
 .../ambari/server/stack/StackManagerTest.java   |   8 -
 .../state/kerberos/KerberosDescriptorTest.java  |  56 ++
 .../KerberosDescriptorUpdateHelperTest.java     |   1 +
 .../kerberos/VariableReplacementHelperTest.java |  35 +-
 .../svccomphost/ServiceComponentHostTest.java   | 102 ++-
 .../topology/AsyncCallableServiceTest.java      |  89 +--
 .../ClusterDeployWithStartOnlyTest.java         |   6 +
 ...InstallWithoutStartOnComponentLevelTest.java |   6 +
 .../ClusterInstallWithoutStartTest.java         |   6 +
 .../topology/ConfigureClusterTaskTest.java      |  64 +-
 .../server/topology/TopologyManagerTest.java    |  14 +-
 .../server/upgrade/UpgradeCatalog260Test.java   |  14 +
 .../src/test/python/TestAmbariServer.py         |   4 +-
 ambari-server/src/test/python/TestMpacks.py     |  12 +-
 .../src/test/python/TestResourceFilesKeeper.py  |   1 +
 .../src/test/python/TestUpgradeSummary.py       |   6 +-
 .../AMBARI_METRICS/test_service_advisor.py      | 596 ++++++++++++++
 .../HIVE/test_jdbc_driver_config.py             |  18 +-
 .../RANGER/test_db_flavor_config.py             |  17 +-
 .../RANGER_KMS/test_db_flavor_config.py         |  17 +-
 .../SQOOP/test_jdbc_driver_config.py            |  16 +-
 .../configs/ranger_admin_default.json           |  55 ++
 .../custom_actions/TestInstallPackages.py       |  50 +-
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |   1 -
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |   6 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |   7 +
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  38 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   5 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |  42 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       | 158 ++--
 .../2.0.6/HIVE/test_hive_service_check.py       |   4 +-
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |  41 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   9 +
 .../2.0.6/OOZIE/test_oozie_service_check.py     |   5 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   5 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   1 +
 .../stacks/2.0.6/common/test_stack_advisor.py   | 576 --------------
 .../python/stacks/2.0.6/configs/default.json    |   1 +
 .../hooks/after-INSTALL/test_after_install.py   |  88 +--
 .../2.0.6/hooks/before-ANY/test_before_any.py   |  24 +-
 .../hooks/before-INSTALL/test_before_install.py |  14 +-
 .../hooks/before-START/test_before_start.py     |  21 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |  26 +-
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  54 +-
 .../stacks/2.2/PIG/test_pig_service_check.py    |  13 +
 .../stacks/2.2/SPARK/test_job_history_server.py |  18 +-
 .../stacks/2.2/common/test_conf_select.py       |  13 +-
 .../stacks/2.2/common/test_stack_advisor.py     | 511 ------------
 .../2.2/common/test_stack_advisor_perf.py       |  66 +-
 .../stacks/2.3/MAHOUT/test_mahout_client.py     |   2 +-
 .../2.3/MAHOUT/test_mahout_service_check.py     |  28 +-
 .../2.3/SPARK/test_spark_thrift_server.py       |   8 +-
 .../stacks/2.3/common/test_stack_advisor.py     |   2 +-
 .../stacks/2.5/RANGER_KMS/test_kms_server.py    |  24 +-
 .../python/stacks/2.5/SPARK/test_spark_livy.py  |  16 +-
 .../stacks/2.5/ZEPPELIN/test_zeppelin_060.py    |  20 +-
 .../stacks/2.5/configs/ranger-kms-secured.json  |   6 +-
 .../test/python/stacks/2.6/DRUID/test_druid.py  |  22 +-
 .../stacks/2.6/SPARK2/test_spark_livy2.py       |  16 +-
 .../stacks/2.6/ZEPPELIN/test_zeppelin_070.py    | 236 +++---
 .../stacks/2.6/common/test_stack_advisor.py     | 190 ++++-
 .../src/test/python/stacks/utils/RMFTestCase.py |  43 +-
 ambari-server/src/test/resources/kerberos.json  |  42 +
 .../resources/stacks/HDP/2.0.8/kerberos.json    |  42 -
 ambari-server/src/test/resources/widgets.json   |  95 +++
 ambari-web/app/controllers/installer.js         |   8 +-
 .../journalNode/progress_controller.js          |   4 +-
 .../journalNode/step4_controller.js             |   6 +-
 .../nameNode/step5_controller.js                |   6 +-
 .../highAvailability/progress_controller.js     |   4 +-
 .../progress_popup_controller.js                |   2 +-
 .../main/admin/kerberos/step2_controller.js     |   7 +-
 .../main/admin/stack_and_upgrade_controller.js  |   4 +-
 ambari-web/app/controllers/main/host/details.js |   2 -
 .../controllers/main/service/info/summary.js    |   1 +
 .../main/service/reassign/step4_controller.js   |   2 -
 .../app/controllers/wizard/step6_controller.js  |  15 +-
 .../app/controllers/wizard/step8_controller.js  |  15 +-
 .../app/mappers/repository_version_mapper.js    |   2 +-
 ambari-web/app/messages.js                      |   8 +-
 .../app/mixins/common/configs/configs_saver.js  |  26 +-
 .../main/service/configs/config_overridable.js  |   1 -
 .../models/stack_version/repository_version.js  |   2 +-
 ambari-web/app/routes/add_kerberos_routes.js    |  22 +-
 ambari-web/app/styles/alerts.less               |  14 +-
 ambari-web/app/styles/application.less          |   7 +-
 ambari-web/app/styles/bootstrap_overrides.less  |   4 +
 ambari-web/app/styles/dashboard.less            |  22 +-
 ambari-web/app/styles/modal_popups.less         |   2 +-
 ambari-web/app/styles/stack_versions.less       |  13 +-
 .../app/styles/theme/bootstrap-ambari.css       |  17 +-
 ambari-web/app/styles/top-nav.less              |   5 +-
 ambari-web/app/styles/wizard.less               |   9 +
 ambari-web/app/templates/common/breadcrumbs.hbs |   4 +-
 .../stack_upgrade/stack_upgrade_wizard.hbs      |   2 +-
 .../main/service/info/service_alert_popup.hbs   |   6 +-
 .../app/templates/main/service/info/summary.hbs |  21 +-
 .../service/info/summary/master_components.hbs  |   4 -
 .../templates/main/service/services/hdfs.hbs    |  38 +-
 ambari-web/app/templates/wizard/step6.hbs       |   2 +-
 ambari-web/app/templates/wizard/step9.hbs       |  12 +-
 ambari-web/app/utils/ajax/ajax.js               |   2 -
 .../stack_upgrade/upgrade_version_box_view.js   |  70 +-
 .../upgrade_version_column_view.js              |   2 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  |  26 +-
 .../app/views/main/service/info/summary.js      |   8 +
 .../app/views/main/service/services/hdfs.js     |   2 -
 ambari-web/test/controllers/installer_test.js   |  37 +-
 .../journalNode/progress_controller_test.js     |   2 -
 .../progress_controller_test.js                 |   3 -
 .../progress_popup_controller_test.js           |   4 +-
 .../admin/kerberos/step2_controller_test.js     |   9 +-
 .../mixins/common/configs/configs_saver_test.js |  28 +-
 .../stack_version/repository_version_test.js    |  12 +-
 .../upgrade_version_box_view_test.js            | 149 ++--
 .../upgrade_version_column_view_test.js         |   6 +
 .../stack_upgrade/upgrade_wizard_view_test.js   |  38 +
 .../src/main/assemblies/hdf-ambari-mpack.xml    |   1 +
 .../HIVE/package/scripts/hive_client.py         |   2 -
 .../HIVE/package/scripts/hive_metastore.py      |   1 -
 .../HIVE/package/scripts/hive_server.py         |   2 +-
 .../package/scripts/hive_server_interactive.py  |   1 -
 .../HIVE/package/scripts/webhcat_server.py      |   2 -
 .../scripts/application_timeline_server.py      |   1 -
 .../YARN/package/scripts/historyserver.py       |   1 -
 .../YARN/package/scripts/mapreduce2_client.py   |   2 -
 .../YARN/package/scripts/nodemanager.py         |   1 -
 .../YARN/package/scripts/resourcemanager.py     |   1 -
 .../YARN/package/scripts/yarn_client.py         |   1 -
 .../ui/hive-web/app/adapters/application.js     |  18 +
 .../app/controllers/visualization-ui.js         |  10 +-
 .../ui/hive-web/app/utils/constants.js          |   1 +
 .../savedQueries/SavedQueryResourceManager.java |  17 +-
 .../resources/ui/app/components/job-item.js     |  49 +-
 .../resources/ui/app/components/jobs-browser.js |   3 +
 .../ui/app/components/query-result-table.js     |   2 +-
 .../src/main/resources/ui/app/routes/jobs.js    |  11 +
 .../main/resources/ui/app/routes/queries/new.js |   2 +
 .../resources/ui/app/routes/queries/query.js    | 240 ++++--
 .../src/main/resources/ui/app/services/jobs.js  |  31 +-
 .../src/main/resources/ui/app/styles/app.scss   |   8 +
 .../ui/app/templates/components/job-item.hbs    |   2 +-
 .../app/templates/components/jobs-browser.hbs   |   2 +-
 .../main/resources/ui/app/templates/jobs.hbs    |   1 +
 .../hive20/src/main/resources/ui/yarn.lock      |   2 +-
 .../apache/ambari/view/utils/hdfs/HdfsApi.java  |   2 +-
 614 files changed, 13386 insertions(+), 14066 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
index a5b3622,9f8a095..5cc5dd5
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
@@@ -39,9 -39,9 +39,12 @@@ public class StatusCommand extends Agen
    @SerializedName("serviceName")
    private String serviceName;
  
 +  @SerializedName("serviceType")
 +  private String serviceType;
 +
+   @SerializedName("role")
+   private String role;
+ 
    @SerializedName("componentName")
    private String componentName;
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 8883ced,46ee65a..c07df43
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@@ -134,8 -136,9 +145,10 @@@ public class AmbariMetaInfo 
    private File commonServicesRoot;
    private File extensionsRoot;
    private File serverVersionFile;
+   private File commonWidgetsDescriptorFile;
    private File customActionRoot;
 +  private File mpacksV2Staging;
+   private String commonKerberosDescriptorFileLocation;
    private Map<String, VersionDefinitionXml> versionDefinitions = null;
  
  
@@@ -243,9 -228,8 +256,11 @@@
  
      customActionRoot = new File(conf.getCustomActionDefinitionPath());
  
 +    String mpacksV2StagingPath = conf.getMpacksV2StagingPath();
 +    mpacksV2Staging = new File(mpacksV2StagingPath);
 +
+     commonKerberosDescriptorFileLocation = new File(conf.getResourceDirPath(), KERBEROS_DESCRIPTOR_FILE_NAME).getAbsolutePath();
+     commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
    }
  
    /**
@@@ -1414,10 -1360,9 +1425,10 @@@
    /**
     * Ensures that the map of version definition files is populated
     */
-   private void ensureVersionDefinitions() {
+   private synchronized void ensureVersionDefinitions() {
      if (null != versionDefinitions) {
 -      return;
 +      if(versionDefinitions.size() > 0)
 +        return;
      }
  
      versionDefinitions = new HashMap<>();
@@@ -1519,6 -1446,9 +1530,10 @@@
      }
  
      return null;
 +
    }
+ 
+   public File getCommonWidgetsDescriptorFile() {
+     return commonWidgetsDescriptorFile;
+   }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index c56483d,dc97871..a4339ab
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@@ -71,9 -72,9 +72,10 @@@ import org.apache.ambari.server.control
  import org.apache.ambari.server.controller.internal.KerberosDescriptorResourceProvider;
  import org.apache.ambari.server.controller.internal.MemberResourceProvider;
  import org.apache.ambari.server.controller.internal.RepositoryVersionResourceProvider;
 +import org.apache.ambari.server.controller.internal.ServiceGroupResourceProvider;
  import org.apache.ambari.server.controller.internal.ServiceResourceProvider;
  import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
+ import org.apache.ambari.server.controller.internal.ViewInstanceResourceProvider;
  import org.apache.ambari.server.controller.logging.LoggingRequestHelperFactory;
  import org.apache.ambari.server.controller.logging.LoggingRequestHelperFactoryImpl;
  import org.apache.ambari.server.controller.metrics.MetricPropertyProviderFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/ResourceProviderFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
index 5d4b832,0000000..130ff62
mode 100644,000000..100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
@@@ -1,561 -1,0 +1,544 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.ambari.server.mpack;
 +
 +import java.io.BufferedOutputStream;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileNotFoundException;
 +import java.io.FileOutputStream;
 +import java.io.FileWriter;
 +import java.io.IOException;
 +import java.net.URL;
 +import java.nio.file.Files;
 +import java.nio.file.Path;
 +import java.nio.file.Paths;
 +import java.nio.file.StandardCopyOption;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.Marshaller;
 +
 +import org.apache.ambari.server.stack.ServiceDirectory;
 +import org.apache.ambari.server.stack.StackDirectory;
 +import org.apache.ambari.server.stack.StackManager;
 +import org.apache.ambari.server.state.Mpack;
 +import org.apache.ambari.server.state.Packlet;
 +import org.apache.ambari.server.state.PropertyInfo;
 +import org.apache.ambari.server.state.QuickLinksConfigurationInfo;
 +import org.apache.ambari.server.state.ServiceInfo;
 +import org.apache.ambari.server.state.StackId;
 +import org.apache.ambari.server.state.StackInfo;
 +import org.apache.ambari.server.state.ThemeInfo;
 +import org.apache.ambari.server.state.quicklinks.QuickLinks;
 +import org.apache.ambari.server.state.repository.ManifestServiceInfo;
 +import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 +import org.apache.ambari.server.state.stack.ConfigurationXml;
 +import org.apache.ambari.server.state.stack.RepositoryXml;
 +import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 +import org.apache.ambari.server.state.stack.StackMetainfoXml;
 +import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 +import org.apache.ambari.server.state.theme.Theme;
 +import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 +import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
 +import org.apache.commons.compress.utils.IOUtils;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.lang.StringUtils;
 +import org.codehaus.jackson.map.ObjectMapper;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.gson.Gson;
 +import com.google.gson.GsonBuilder;
 +
 +/**
 + * Generate management pack from stack definition
 + */
 +public class MpackGenerator {
 +  private static final Logger LOG = LoggerFactory.getLogger
 +    (MpackGenerator.class);
 +
 +  private static final ObjectMapper mapper = new ObjectMapper();
 +  private File commonServicesRoot;
 +  private File stackRoot;
 +  private File mpacksRoot;
 +  private StackId srcStackId;
 +  private StackId dstStackId;
 +  private StackManager stackManager;
 +  private VersionDefinitionXml vdf;
 +  private Gson gson = new GsonBuilder().setPrettyPrinting().create();
 +
 +  /**
 +   * {@link MpackGenerator} constructor
 +   * @param stackRoot           Stack root directory
 +   * @param commonServicesRoot  Common services root directory
 +   * @param srcStackId          Source stack id
 +   * @param vdfUrl              Version Definition File
 +   * @param mpacksRoot          Management pack root directory
 +   * @throws Exception
 +   */
 +  public MpackGenerator(File stackRoot, File commonServicesRoot, StackId srcStackId, URL vdfUrl, File mpacksRoot)
 +    throws Exception {
 +    this.commonServicesRoot = commonServicesRoot;
 +    this.stackRoot = stackRoot;
 +    this.mpacksRoot = mpacksRoot;
 +    this.srcStackId = srcStackId;
 +    this.stackManager = new StackManager(stackRoot, commonServicesRoot, false);
 +    this.vdf = VersionDefinitionXml.load(vdfUrl);
 +    this.dstStackId = new StackId(vdf.release.stackId);
 +  }
 +
 +  /**
 +   * Generate management pack
 +   * @throws Exception
 +   */
 +  public void generateMpack() throws Exception {
 +    String dstStackName = dstStackId.getStackName();
 +    String version = vdf.release.version;
 +    String build = vdf.release.build;
 +
 +    System.out.println("===========================================================");
 +    System.out.println("Source Stack Id: " + srcStackId);
 +    System.out.println("Destination Stack Id: " + dstStackId);
 +    System.out.println("===========================================================");
 +
 +    String mpackName = dstStackName.toLowerCase() + "-ambari-mpack" + "-" + version + "-" + build;
 +    File mpackRootDir = new File(mpacksRoot.getAbsolutePath() + File.separator + mpackName);
 +    if (!mpacksRoot.exists()) {
 +      mpacksRoot.mkdirs();
 +    }
 +    if (mpackRootDir.exists()) {
 +      FileUtils.deleteDirectory(mpackRootDir);
 +    }
 +    mpackRootDir.mkdir();
 +
 +    File mpackPackletsDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "packlets");
 +    if (mpackPackletsDir.exists()) {
 +      mpackPackletsDir.delete();
 +    }
 +    mpackPackletsDir.mkdir();
 +
 +    StackInfo srcStackInfo = stackManager.getStack(srcStackId.getStackName(), srcStackId.getStackVersion());
 +    StackRoleCommandOrder stackRoleCommandOrder = srcStackInfo.getRoleCommandOrder();
 +    FileWriter stackRCOFile = new FileWriter(
 +      mpackRootDir.getAbsolutePath() + File.separator + StackDirectory.RCO_FILE_NAME);
 +    mapper.writerWithDefaultPrettyPrinter().writeValue(stackRCOFile, stackRoleCommandOrder.getContent());
 +
 +    // Export stack configs
 +    File stackConfigDir = new File(
 +      mpackRootDir.getAbsolutePath() + File.separator + StackDirectory.SERVICE_CONFIG_FOLDER_NAME);
 +    exportConfigs(srcStackInfo.getProperties(), stackConfigDir);
 +
-     // Export widgets.json
-     exportFile(new File(srcStackInfo.getWidgetsDescriptorFileLocation()), mpackRootDir);
- 
-     // Export kerberos.json
-     exportFile(new File(srcStackInfo.getKerberosDescriptorFileLocation()), mpackRootDir);
- 
 +    // Export repoinfo.xml
 +    RepositoryXml repositoryXml =  srcStackInfo.getRepositoryXml();
 +    JAXBContext ctx = JAXBContext.newInstance(RepositoryXml.class);
 +    Marshaller marshaller = ctx.createMarshaller();
 +    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +    File reposDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "repos");
 +    if (!reposDir.exists()) {
 +      reposDir.mkdir();
 +    }
 +    FileOutputStream repoXmlFileStream = new FileOutputStream(
 +      reposDir.getAbsolutePath() + File.separator + "repoinfo.xml");
 +    marshaller.marshal(repositoryXml, repoXmlFileStream);
 +    repoXmlFileStream.flush();
 +    repoXmlFileStream.close();
 +
 +    // Copy JSON cluster property files (example: stacks/HDP/2.0.6/properties/stack_tools.json)
 +    File destPropertiesDir = new File(mpackRootDir.getAbsoluteFile() + File.separator + "properties");
 +    if(!destPropertiesDir.exists()) {
 +      destPropertiesDir.mkdir();
 +    }
 +    String srcStackName = srcStackId.getStackName();
 +    String currentStackVersion = srcStackId.getStackVersion();
 +
 +    while (!StringUtils.isEmpty(currentStackVersion)) {
 +      StackInfo currentStackInfo = stackManager.getStack(srcStackName, currentStackVersion);
 +      File srcPropertiesDir = new File(stackRoot.getAbsolutePath() + File.separator + srcStackName + File.separator + currentStackVersion + File.separator + "properties");
 +      if (srcPropertiesDir.exists() && srcPropertiesDir.isDirectory()) {
 +        for (File srcPropertiesFile : srcPropertiesDir.listFiles()) {
 +          File destPropertiesFile = new File(destPropertiesDir.getAbsolutePath() + File.separator + srcPropertiesFile.getName());
 +          if (!destPropertiesFile.exists()) {
 +            FileUtils.copyFile(srcPropertiesFile, destPropertiesFile);
 +          }
 +        }
 +      }
 +      currentStackVersion = currentStackInfo.getParentStackVersion();
 +    }
 +
-     // Copy stack hooks folder
-     String srcStackHooksFolder = srcStackInfo.getStackHooksFolder();
-     srcStackHooksFolder = stackRoot.getAbsolutePath() + File.separator + srcStackHooksFolder;
-     File srcStackHooksFile = new File(srcStackHooksFolder);
-     if (srcStackHooksFile != null && srcStackHooksFile.exists()) {
-       File destStackHooksFile = new File(
-         mpackRootDir.getAbsolutePath() + File.separator
-           + srcStackHooksFile.getName());
-       FileUtils.copyDirectory(srcStackHooksFile, destStackHooksFile);
-     }
- 
 +    // Export stack metainfo.xml
 +    String parentStackVersion = srcStackInfo.getParentStackVersion();
 +    StackMetainfoXml stackMetainfoXml = new StackMetainfoXml();
 +    stackMetainfoXml.setMinJdk(srcStackInfo.getMinJdk());
 +    stackMetainfoXml.setMaxJdk(srcStackInfo.getMaxJdk());
 +    StackMetainfoXml.Version ver = new StackMetainfoXml.Version();
 +    ver.setActive(srcStackInfo.isActive());
 +    stackMetainfoXml.setVersion(ver);
 +    ctx = JAXBContext.newInstance(StackMetainfoXml.class);
 +    marshaller = ctx.createMarshaller();
 +    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +    FileOutputStream stackMetainfoFileStream = new FileOutputStream(
 +      mpackRootDir.getAbsolutePath() + File.separator + "metainfo.xml");
 +    marshaller.marshal(stackMetainfoXml, stackMetainfoFileStream);
 +    stackMetainfoFileStream.flush();
 +    stackMetainfoFileStream.close();
 +
 +    // Export stack advisors
 +    File stackAdvisorsDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "stack-advisor");
 +    if(!stackAdvisorsDir.exists()) {
 +      stackAdvisorsDir.mkdir();
 +    }
 +
 +    currentStackVersion = srcStackId.getStackVersion();
 +    String baseStackAdvisor = null;
 +    String baseStackAdvisorModule = null;
 +    while (!StringUtils.isEmpty(currentStackVersion)) {
 +      // Copy all inherited stack advisors from source stack to "stack-advisor" folder
 +      StackInfo currentStackInfo = stackManager.getStack(srcStackName, currentStackVersion);
 +      File srcStackAdvisor = new File(stackRoot.getAbsolutePath() + File.separator + srcStackName + File.separator + currentStackVersion + File.separator + "services" + File.separator + "stack_advisor.py");
 +      if(srcStackAdvisor.exists()) {
 +        if(baseStackAdvisor == null) {
 +          baseStackAdvisor = srcStackName.toUpperCase() + currentStackVersion.replace(".", "") + "StackAdvisor";
 +          baseStackAdvisorModule = "stack_advisor_" + srcStackName.toLowerCase() + currentStackVersion.replace(".", "");
 +        }
 +        File dstStackAdvisor = new File(
 +          stackAdvisorsDir.getAbsolutePath() + File.separator + "stack_advisor_" + srcStackName.toLowerCase()
 +            + currentStackVersion.replace(".", "") + ".py");
 +        FileUtils.copyFile(srcStackAdvisor, dstStackAdvisor);
 +      }
 +      currentStackVersion = currentStackInfo.getParentStackVersion();
 +    }
 +    if(baseStackAdvisor != null) {
 +      File mpackServicesDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "services");
 +      if (!mpackServicesDir.exists()) {
 +        mpackServicesDir.mkdir();
 +      }
 +      String mpackStackAdvisorName = dstStackId.getStackName().toUpperCase() + dstStackId.getStackVersion().replace(".", "") + "StackAdvisor";
 +      if(baseStackAdvisor.equalsIgnoreCase(mpackStackAdvisorName)) {
 +        // Use top level stack advisor from source stack as mpack stack advisor
 +        String srcPath = stackAdvisorsDir.getAbsolutePath() + File.separator + baseStackAdvisorModule + ".py";
 +        String dstPath = mpackServicesDir.getAbsolutePath() + File.separator + "stack_advisor.py";
 +        Files.move(Paths.get(srcPath), Paths.get(dstPath));
 +      } else {
 +        // Create mpack stack advisor that inherits from top level stack advisor from source stack
 +        FileWriter fileWriter = new FileWriter(
 +          mpackServicesDir.getAbsolutePath() + File.separator + "stack_advisor.py");
 +        BufferedWriter bw = new BufferedWriter(fileWriter);
 +        bw.write("from " + baseStackAdvisorModule + " import *");
 +        bw.newLine();
 +        bw.write("class " + mpackStackAdvisorName + "(" + baseStackAdvisor + ")");
 +        bw.newLine();
 +        bw.write("  pass");
 +        bw.newLine();
 +        bw.flush();
 +        fileWriter.flush();
 +        bw.close();
 +        fileWriter.close();
 +      }
 +    }
 +
 +    Mpack mpack = new Mpack();
 +    mpack.setName(dstStackName);
 +    mpack.setVersion(vdf.release.version);
 +    mpack.setBuildNumber(vdf.release.getFullVersion());
 +    mpack.setStackId(dstStackId.getStackId());
 +    mpack.setDescription(dstStackName + " Ambari Management Pack");
 +    Map<String, String> prereqs = new HashMap<>();
 +    prereqs.put("min-ambari-version", "3.0.0.0");
 +    mpack.setPrerequisites(prereqs);
 +    List<Packlet> packlets = new ArrayList<>();
 +    mpack.setPacklets(packlets);
 +
 +    for (ManifestServiceInfo manifestServiceInfo : vdf.getStackServices(srcStackInfo, true /* skipMissingServices = true */)) {
 +      ServiceInfo serviceInfo = srcStackInfo.getService(manifestServiceInfo.getName());
 +      String serviceName = manifestServiceInfo.getName();
 +      String serviceVersion = (String) manifestServiceInfo.getVersions().toArray()[0];
 +      ServiceInfo clonedServiceInfo =  (ServiceInfo) serviceInfo.clone();
 +      clonedServiceInfo.setVersion(serviceVersion);
 +      clonedServiceInfo.setParent(null);
 +      if (serviceInfo.getMetricsFile() != null) {
 +        clonedServiceInfo.setMetricsFileName(serviceInfo.getMetricsFile().getName());
 +      }
 +      if( serviceInfo.getWidgetsDescriptorFile() != null) {
 +        clonedServiceInfo.setWidgetsFileName(serviceInfo.getWidgetsDescriptorFile().getName());
 +      }
 +
 +      System.out.println("Processing service=" + serviceInfo.getName() + ", version=" + serviceVersion);
 +      System.out.println("Service Parent : " + serviceInfo.getParent());
 +      String packletDirName = serviceName + "-packlet-" + serviceVersion;
 +      String packletTarName = packletDirName + ".tar.gz";
 +      File packletDir = new File(
 +        mpackPackletsDir.getAbsolutePath() + File.separator + packletDirName);
 +      if (!packletDir.exists()) {
 +        packletDir.mkdir();
 +      }
 +      Packlet packlet = new Packlet();
 +      packlet.setType(Packlet.PackletType.SERVICE_PACKLET);
 +      packlet.setName(serviceName);
 +      packlet.setVersion(serviceVersion);
 +      packlet.setServiceId(serviceVersion);
 +      packlet.setSourceLocation("packlets" + File.separator + packletTarName);
 +      packlets.add(packlet);
 +
 +      // Export service metainfo.xml
 +      ServiceMetainfoXml serviceMetainfoXml = new ServiceMetainfoXml();
 +      serviceMetainfoXml.setSchemaVersion(clonedServiceInfo.getSchemaVersion());
 +      List<ServiceInfo> serviceInfos = Collections.singletonList(clonedServiceInfo);
 +      serviceMetainfoXml.setServices(serviceInfos);
 +      ctx = JAXBContext.newInstance(ServiceMetainfoXml.class);
 +      marshaller = ctx.createMarshaller();
 +      marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +      FileOutputStream serviceMetainfoFileStream = new FileOutputStream(
 +        packletDir.getAbsolutePath() + File.separator + "metainfo.xml");
 +      marshaller.marshal(serviceMetainfoXml, serviceMetainfoFileStream);
 +      serviceMetainfoFileStream.flush();
 +      serviceMetainfoFileStream.close();
 +
 +      // Export mertrics.json
 +      File srcMetricsFile = serviceInfo.getMetricsFile();
 +      exportFile(srcMetricsFile, packletDir);
 +
 +      // Export widgets.json
 +      File srcWidgetsFile = serviceInfo.getWidgetsDescriptorFile();
 +      exportFile(srcWidgetsFile, packletDir);
 +
 +      // Export alerts.json
 +      File srcAlertsFile = serviceInfo.getAlertsFile();
 +      exportFile(srcAlertsFile, packletDir);
 +
 +      // Export kerberos.json
 +      File srcKerberosFile = serviceInfo.getKerberosDescriptorFile();
 +      exportFile(srcKerberosFile, packletDir);
 +
 +      // Export quicklinks
 +      for (Map.Entry<String, QuickLinksConfigurationInfo> entry : serviceInfo.getQuickLinksConfigurationsMap()
 +        .entrySet()) {
 +        QuickLinksConfigurationInfo quickLinksConfigurationInfo = entry.getValue();
 +        String quickLinksFileName = quickLinksConfigurationInfo.getFileName();
 +        for (Map.Entry<String, QuickLinks> quickLinksEntry : quickLinksConfigurationInfo
 +          .getQuickLinksConfigurationMap().entrySet()) {
 +          File quickLinksDir = new File(
 +            packletDir.getAbsolutePath() + File.separator + serviceInfo
 +              .getQuickLinksConfigurationsDir());
 +          if (!quickLinksDir.exists()) {
 +            quickLinksDir.mkdir();
 +          }
 +          FileWriter quickLinksFileWriter = new FileWriter(
 +            quickLinksDir.getAbsolutePath() + File.separator + quickLinksFileName, true);
 +          mapper.writerWithDefaultPrettyPrinter()
 +            .writeValue(quickLinksFileWriter, quickLinksEntry.getValue());
 +        }
 +      }
 +
 +      // Export themes
 +      for (Map.Entry<String, ThemeInfo> entry : serviceInfo.getThemesMap().entrySet()) {
 +        ThemeInfo themeInfo = entry.getValue();
 +        String themeFileName = themeInfo.getFileName();
 +        for (Map.Entry<String, Theme> themeEntry : themeInfo.getThemeMap().entrySet()) {
 +          File themesDir = new File(
 +            packletDir.getAbsolutePath() + File.separator + serviceInfo.getThemesDir());
 +          if (!themesDir.exists()) {
 +            themesDir.mkdir();
 +          }
 +          FileWriter themesFileWriter = new FileWriter(
 +            themesDir.getAbsolutePath() + File.separator + themeFileName, true);
 +          mapper.writerWithDefaultPrettyPrinter().writeValue(themesFileWriter, themeEntry.getValue());
 +        }
 +      }
 +
 +      // Export package folder
 +      String srcPackageFolder = serviceInfo.getServicePackageFolder();
 +      if (srcPackageFolder.startsWith("common-services")) {
 +        srcPackageFolder = srcPackageFolder
 +          .replace("common-services", commonServicesRoot.getAbsolutePath());
 +      } else {
 +        srcPackageFolder = srcPackageFolder.replace("stacks", stackRoot.getAbsolutePath());
 +      }
 +      File srcPackageFile = new File(srcPackageFolder);
 +      if (srcPackageFile != null && srcPackageFile.exists()) {
 +        File destPackageFile = new File(
 +          packletDir.getAbsolutePath() + File.separator
 +            + ServiceDirectory.PACKAGE_FOLDER_NAME);
 +        FileUtils.copyDirectory(srcPackageFile, destPackageFile);
 +      }
 +
 +      // Export merged configs
 +      File configDir = new File(
 +        packletDir.getAbsolutePath() + File.separator + serviceInfo.getConfigDir());
 +      exportConfigs(serviceInfo.getProperties(), configDir);
 +
 +      // Copy service advisor
 +      File srcServiceAdvisor = serviceInfo.getAdvisorFile();
 +      File destServiceAdvisor = new File(packletDir.getAbsolutePath() + File.separator + "service_advisor.py");
 +      if(srcServiceAdvisor != null && srcServiceAdvisor.exists()) {
 +        FileUtils.copyFile(srcServiceAdvisor, destServiceAdvisor);
 +      }
 +
 +      // TODO: Export upgrade packs
 +
 +      // Create packlet tarball
 +      createTarGzip(packletDir.getAbsolutePath());
 +      if(packletDir.exists()) {
 +        FileUtils.deleteDirectory(packletDir);
 +      }
 +    }
 +
 +    // Create mpack.json
 +    String mpackFilePath = mpackRootDir.getAbsolutePath() + File.separator + "mpack.json";
 +    FileWriter mpackFileWriter = new FileWriter(mpackFilePath);
 +    gson.toJson(mpack, Mpack.class, mpackFileWriter);
 +    mpackFileWriter.flush();
 +    mpackFileWriter.close();
 +
 +    // Create mpack tarball
 +    createTarGzip(mpackRootDir.getAbsolutePath());
 +    if(mpackRootDir.exists()) {
 +      FileUtils.deleteDirectory(mpackRootDir);
 +    }
 +  }
 +
 +  public static void exportFile(File srcFile, File destRootDir) throws Exception {
 +    if (srcFile != null && srcFile.exists()) {
 +      Path srcPath = Paths.get(srcFile.getAbsolutePath());
 +      Path destPath = Paths.get(
 +        destRootDir.getAbsolutePath() + File.separator + srcFile.getName());
 +      Files.copy(srcPath, destPath, StandardCopyOption.COPY_ATTRIBUTES,
 +        StandardCopyOption.REPLACE_EXISTING);
 +    }
 +  }
 +
 +  public void createTarGzip(String path) throws FileNotFoundException, IOException {
 +    File file = new File(path);
 +    if(!file.exists()) {
 +      throw new FileNotFoundException(path + " does not exist");
 +    }
 +    String parentDirName = file.getParent();
 +    String tarGzPath = parentDirName + File.separator + file.getName() + ".tar.gz";
 +    // Delete existing tarball
 +    File f = new File(tarGzPath);
 +    if(f.exists()) {
 +      f.delete();
 +    }
 +    FileOutputStream fOut = null;
 +    BufferedOutputStream bOut = null;
 +    GzipCompressorOutputStream gzOut = null;
 +    TarArchiveOutputStream tOut = null;
 +    try {
 +      fOut = new FileOutputStream(new File(tarGzPath));
 +      bOut = new BufferedOutputStream(fOut);
 +      gzOut = new GzipCompressorOutputStream(bOut);
 +      tOut = new TarArchiveOutputStream(gzOut);
 +      addFileToTarGz(tOut, path, "");
 +      System.out.println("Compressed " + path + " -> " + tarGzPath);
 +    } finally {
 +      if(tOut != null) {
 +        tOut.finish();
 +        tOut.close();
 +      }
 +      if(gzOut != null) {
 +        gzOut.close();
 +      }
 +      if(bOut != null) {
 +        bOut.close();
 +      }
 +      if(fOut != null) {
 +        fOut.close();
 +      }
 +    }
 +
 +  }
 +
 +  private void addFileToTarGz(TarArchiveOutputStream tOut, String path, String base)
 +    throws IOException
 +  {
 +    File f = new File(path);
 +    String entryName = base + f.getName();
 +    TarArchiveEntry tarEntry = new TarArchiveEntry(f, entryName);
 +    tOut.putArchiveEntry(tarEntry);
 +
 +    if (f.isFile()) {
 +      IOUtils.copy(new FileInputStream(f), tOut);
 +      tOut.closeArchiveEntry();
 +    } else {
 +      tOut.closeArchiveEntry();
 +      File[] children = f.listFiles();
 +      if (children != null) {
 +        for (File child : children) {
 +          addFileToTarGz(tOut, child.getAbsolutePath(), entryName + "/");
 +        }
 +      }
 +    }
 +  }
 +
 +  public static void exportConfigs(List<PropertyInfo> properties, File configDir) throws Exception {
 +    if (!configDir.exists()) {
 +      configDir.mkdir();
 +    }
 +
 +    Map<String, List<PropertyInfo>> configFilesMap = new HashMap<>();
 +    for (PropertyInfo propertyInfo : properties) {
 +      String fileName = propertyInfo.getFilename();
 +      if (!configFilesMap.containsKey(fileName)) {
 +        configFilesMap.put(fileName, new ArrayList<PropertyInfo>());
 +      }
 +      configFilesMap.get(fileName).add(propertyInfo);
 +    }
 +
 +    for (Map.Entry<String, List<PropertyInfo>> entry : configFilesMap.entrySet()) {
 +      String fileName = entry.getKey();
 +      ConfigurationXml configXml = new ConfigurationXml();
 +      configXml.setProperties(entry.getValue());
 +      JAXBContext ctx = JAXBContext.newInstance(ConfigurationXml.class);
 +      Marshaller marshaller = ctx.createMarshaller();
 +      marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +      FileOutputStream configFileStream = new FileOutputStream(
 +        configDir.getAbsolutePath() + File.separator + fileName);
 +      marshaller.marshal(configXml, configFileStream);
 +      configFileStream.flush();
 +      configFileStream.close();
 +    }
 +  }
 +
 +  /**
 +   * Main method for generating mpack
 +   * @param args
 +   * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    System.out.println("Mpack Generator Started");
 +    String stackDir = args[0];
 +    String commonServicesDir = args[1];
 +    String srcStack = args[2];
 +    String vdfPath = args[3];
 +    String mpacksDir = args[4];
 +    MpackGenerator mpackGenerator = new MpackGenerator(
 +      new File(stackDir),
 +      new File(commonServicesDir),
 +      new StackId(srcStack),
 +      new URL(vdfPath),
 +      new File(mpacksDir));
 +    mpackGenerator.generateMpack();
 +    System.out.println("Mpack Generator Finished");
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index da23ebc,eb6737a..9b740d0
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@@ -205,91 -193,6 +210,85 @@@ public class StackManager 
      populateDB(stackDao, extensionDao);
    }
  
 +  /***
 +   *  Constructor. Initialize StackManager for merging service definitions and creating management packs
 +   * @param stackRoot
 +   * @param commonServicesRoot
 +   */
 +  public StackManager(File stackRoot, File commonServicesRoot, boolean validate) throws AmbariException{
 +    LOG.info("Initializing the stack manager...");
 +
 +    if (validate) {
 +      validateStackDirectory(stackRoot);
 +      validateCommonServicesDirectory(commonServicesRoot);
 +    }
 +
 +    stackMap = new HashMap<>();
 +
 +    parseDirectories(stackRoot, commonServicesRoot, null);
 +
 +    fullyResolveCommonServices(stackModules, commonServiceModules, extensionModules);
 +    fullyResolveExtensions(stackModules, commonServiceModules, extensionModules);
 +    fullyResolveStacks(stackModules, commonServiceModules, extensionModules);
 +  }
 +
 +  protected void updateArchives(
 +    File resourcesRoot, File stackRoot, Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules,
 +    Map<String, ExtensionModule> extensionModules ) throws AmbariException {
 +
 +    LOG.info("Refreshing archives ...");
 +
 +    LOG.debug("Refreshing archives for stacks");
 +    for (StackModule stackModule : stackModules.values()) {
 +      LOG.debug("Refreshing archives for stack : " + stackModule.getId());
-       String hooksDir = stackModule.getStackDirectory().getHooksDir();
-       if(hooksDir != null) {
-         LOG.debug("Refreshing archive for stack hooks directory : " + hooksDir);
-         String hooksAbsolutePath = stackRoot.getAbsolutePath() + File.separator + hooksDir;
-         ResourceFilesKeeperHelper.updateDirectoryArchive(hooksAbsolutePath, false);
-       }
 +      for(ServiceModule serviceModule : stackModule.getServiceModules().values()) {
 +        String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +        if(packageDir != null) {
 +          LOG.debug("Refreshing archive for stack service package directory : " + packageDir);
 +          String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +          ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +        }
 +      }
 +    }
 +
 +    LOG.debug("Refreshing archives for common services");
 +    for(ServiceModule serviceModule : commonServiceModules.values()) {
 +      String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +      if(packageDir != null) {
 +        LOG.debug("Refreshing archive for common service package directory : " + packageDir);
 +        String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +        ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +      }
 +    }
 +
 +    LOG.debug("Refreshing archives for extensions");
 +    for(ExtensionModule extensionModule : extensionModules.values()) {
 +      LOG.debug("Refreshing archives for extension module" + extensionModule.getId());
 +      for(ServiceModule serviceModule : extensionModule.getServiceModules().values()) {
 +        String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +        if(packageDir != null) {
 +          LOG.debug("Refreshing archive for extension service package directory : " + packageDir);
 +          String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +          ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +        }
 +      }
 +    }
 +
 +    List<String> miscDirs = new ArrayList<String>() {{
 +      add(CUSTOM_ACTIONS_DIR);
 +      add(HOST_SCRIPTS_DIR);
 +      add(DASHBOARDS_DIR);
 +    }};
 +
 +    LOG.debug("Refreshing archives for misc directories");
 +    for(String miscDir : miscDirs) {
 +      LOG.debug("Refreshing archive for misc directory : " + miscDir);
 +      String miscAbsolutePath = resourcesRoot.getAbsolutePath() + File.separator + miscDir;
 +      ResourceFilesKeeperHelper.updateDirectoryArchive(miscAbsolutePath, false);
 +    }
 +    LOG.info("Refreshing archives finished!");
 +  }
 +
    protected void parseDirectories(File stackRoot, File commonServicesRoot, File extensionRoot) throws AmbariException {
      commonServiceModules = parseCommonServicesDirectory(commonServicesRoot);
      stackModules = parseStackDirectory(stackRoot);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 56d352e,4af83ef..13ceeb9
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@@ -64,7 -67,8 +67,9 @@@ import org.apache.ambari.server.state.H
  import org.apache.ambari.server.state.HostHealthStatus.HealthStatus;
  import org.apache.ambari.server.state.HostState;
  import org.apache.ambari.server.state.MaintenanceState;
++import org.apache.ambari.server.state.Service;
  import org.apache.ambari.server.state.StackId;
+ import org.apache.ambari.server.state.UpgradeState;
  import org.apache.ambari.server.state.configgroup.ConfigGroup;
  import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
  import org.apache.ambari.server.state.fsm.SingleArcTransition;
@@@ -1168,6 -1172,45 +1173,48 @@@ public class HostImpl implements Host 
  
      return false;
    }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public boolean isRepositoryVersionCorrect(RepositoryVersionEntity repositoryVersion)
+       throws AmbariException {
+     HostEntity hostEntity = getHostEntity();
+     Collection<HostComponentStateEntity> hostComponentStates = hostEntity.getHostComponentStateEntities();
+ 
+     // for every host component, if it matches the desired repo and has reported
+     // the correct version then we're good
+     for (HostComponentStateEntity hostComponentState : hostComponentStates) {
+       ServiceComponentDesiredStateEntity desiredComponmentState = hostComponentState.getServiceComponentDesiredStateEntity();
+       RepositoryVersionEntity desiredRepositoryVersion = desiredComponmentState.getDesiredRepositoryVersion();
+ 
++      Long clusterId = hostComponentState.getClusterId();
++      Cluster cluster = clusters.getCluster(clusterId);
++      Service s = cluster.getService(hostComponentState.getServiceId());
+       ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+           desiredRepositoryVersion.getStackName(), desiredRepositoryVersion.getStackVersion(),
 -          hostComponentState.getServiceName(), hostComponentState.getComponentName());
++          s.getName(), hostComponentState.getComponentName());
+ 
+       // skip components which don't advertise a version
+       if (!componentInfo.isVersionAdvertised()) {
+         continue;
+       }
+ 
+       // we only care about checking the specified repo version for this host
+       if (!repositoryVersion.equals(desiredRepositoryVersion)) {
+         continue;
+       }
+ 
+       String versionAdvertised = hostComponentState.getVersion();
+       if (hostComponentState.getUpgradeState() == UpgradeState.IN_PROGRESS
+           || !StringUtils.equals(versionAdvertised, repositoryVersion.getVersion())) {
+         return false;
+       }
+     }
+ 
+     return true;
+   }
  }
  
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index dbe2eed,9285526..8c44632
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@@ -73,6 -71,6 +73,7 @@@ import org.apache.ambari.server.state.O
  import org.apache.ambari.server.state.PropertyDependencyInfo;
  import org.apache.ambari.server.state.PropertyInfo;
  import org.apache.ambari.server.state.RepositoryInfo;
++import org.apache.ambari.server.state.ServiceGroup;
  import org.apache.ambari.server.state.ServiceInfo;
  import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.StackInfo;
@@@ -1738,8 -1716,8 +1721,8 @@@ public class AmbariMetaInfoTest 
  
      RepositoryVersionEntity repositoryVersion = ormHelper.getOrCreateRepositoryVersion(
          cluster.getCurrentStackVersion(), repoVersion);
--
--    cluster.addService("HDFS", repositoryVersion);
++    ServiceGroup sg = cluster.addServiceGroup("core");
++    cluster.addService(sg, "HDFS", "HDFS", repositoryVersion);
  
      metaInfo.reconcileAlertDefinitions(clusters);
  
@@@ -1962,7 -1947,7 +1952,8 @@@
      Properties properties = new Properties();
      properties.setProperty(Configuration.METADATA_DIR_PATH.getKey(), stackRoot.getPath());
      properties.setProperty(Configuration.SERVER_VERSION_FILE.getKey(), versionFile.getPath());
 +    properties.setProperty(Configuration.MPACKS_V2_STAGING_DIR_PATH.getKey(),"src/test/resources/mpacks-v2");
+     properties.setProperty(Configuration.RESOURCES_DIR.getKey(), resourcesRoot.getPath());
      Configuration configuration = new Configuration(properties);
  
      TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(configuration);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index f7f7ac6,a3d13b1..049fe27
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@@ -69,11 -66,10 +69,12 @@@ public class KerberosDescriptorUpdateHe
          properties.put("common.services.path", "src/main/resources/common-services");
          properties.put("server.version.file", "target/version");
          properties.put("custom.action.definitions", "/tmp/nofile");
 +        properties.put("mpacks-v2.staging.path","src/test/resources/mpacks-v2");
+         properties.put("resources.dir", "src/main/resources");
          Configuration configuration = new Configuration(properties);
  
 -        install(new FactoryModuleBuilder().build(StackManagerFactory.class));
 +        install(new FactoryModuleBuilder().implement(
 +          StackManager.class, StackManagerMock.class).build(StackManagerFactory.class));
  
          bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
          bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/test/controllers/installer_test.js
----------------------------------------------------------------------
diff --cc ambari-web/test/controllers/installer_test.js
index 37c2303,65a1e5c..d2d4f2c
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@@ -494,27 -468,44 +494,44 @@@ describe('App.InstallerController', fun
        };
  
        beforeEach(function () {
+         sinon.spy(checker, 'loadStacks');
 -        installerController.loadMap['1'][0].callback.call(checker);
 +        installerController.loadMap['step1'][0].callback.call(checker);
        });
  
-       it('stack info is loaded', function () {
-         expect(loadStacks).to.be.true;
+       afterEach(function() {
+         checker.loadStacks.restore();
+       });
+ 
+       it('should call loadStacks, stack info not loaded', function () {
+         expect(checker.loadStacks.calledOnce).to.be.true;
        });
      });
  
-     describe ('Should load stacks async', function() {
-       var loadStacksVersions = false;
+     describe('Should load stacks async', function() {
        var checker = {
-         loadStacksVersions: function() {
-           loadStacksVersions = true;
-         }
+         loadStacksVersions: Em.K
        };
  
+       beforeEach(function () {
+         sinon.spy(checker, 'loadStacksVersions');
+       });
+ 
+       afterEach(function() {
+         checker.loadStacksVersions.restore();
+       });
+ 
        it('stack versions are loaded', function () {
 -        installerController.loadMap['1'][1].callback.call(checker, true).then(function(data){
 +        installerController.loadMap['step1'][1].callback.call(checker, true).then(function(data){
            expect(data).to.be.true;
          });
-         expect(loadStacksVersions).to.be.false;
+         expect(checker.loadStacksVersions.called).to.be.false;
+       });
+ 
+       it('should call loadStacksVersions, stack versions not loaded', function () {
+         installerController.loadMap['1'][1].callback.call(checker, false).then(function(data){
+           expect(data).to.be.true;
+         });
+         expect(checker.loadStacksVersions.calledOnce).to.be.true;
        });
      });
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/test/mixins/common/configs/configs_saver_test.js
----------------------------------------------------------------------


[23/50] [abbrv] ambari git commit: AMBARI-22160. hadooplzo package installation failed on devdeploys (aonishuk)

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/snippet/Snippet.java
----------------------------------------------------------------------
diff --git a/ambari-server/snippet/Snippet.java b/ambari-server/snippet/Snippet.java
new file mode 100644
index 0000000..f13d533
--- /dev/null
+++ b/ambari-server/snippet/Snippet.java
@@ -0,0 +1,8 @@
+package snippet;
+
+public class Snippet {
+  public static void main(String[] args) {
+    /home/user/ambari/ambari-views/bin/.project
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 2224d31..c32044c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -95,7 +95,9 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
+                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
+      
 
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc80a183/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index bff8642..ae33a2a 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False):
+                    log_out_files=False,
+                    available_packages_in_repos = []):
 
     norm_path = os.path.normpath(path)
 
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
+        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))


[18/50] [abbrv] ambari git commit: AMBARI-22153.On Zeppelin restart sometimes interpreter settings get reset(Prabhjyot Singh via Venkata Sairam)

Posted by ja...@apache.org.
AMBARI-22153.On Zeppelin restart sometimes interpreter settings get reset(Prabhjyot Singh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/84e616da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/84e616da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/84e616da

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 84e616da753224c43d62ddaeb8f1ef935c62d876
Parents: e61556c
Author: Venkata Sairam <ve...@gmail.com>
Authored: Mon Oct 9 12:04:32 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Mon Oct 9 12:04:32 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.7.0/package/scripts/master.py    |  33 +++---
 .../stacks/2.6/ZEPPELIN/test_zeppelin_070.py    | 101 +++++--------------
 2 files changed, 45 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/84e616da/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
index a450fb6..d615d06 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
@@ -192,18 +192,8 @@ class Master(Script):
       notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
                            params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
 
-    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-    kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
-
-    notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
-                                           user=params.zeppelin_user)[1]
-
-    #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
-    if "\n" in notebook_directory_exists:
-      notebook_directory_exists = notebook_directory_exists.split("\n")[1]
 
-    # '1' means it does not exists
-    if notebook_directory_exists == '1':
+    if self.is_path_exists_in_HDFS(notebook_directory, params.zeppelin_user):
       # hdfs dfs -mkdir {notebook_directory}
       params.HdfsResource(format("{notebook_directory}"),
                           type="directory",
@@ -310,6 +300,22 @@ class Master(Script):
 
     return hdfs_interpreter_config
 
+  def is_path_exists_in_HDFS(self, path, as_user):
+    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+    path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {path};echo $?"),
+                             user=as_user)[1]
+
+    # if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+    if "\n" in path_exists:
+      path_exists = path_exists.split("\n")[1]
+
+    # '1' means it does not exists
+    if path_exists == '0':
+      return True
+    else:
+      return False
+
   def get_interpreter_settings(self):
     import params
     import json
@@ -320,12 +326,14 @@ class Master(Script):
 
       if 'zeppelin.config.fs.dir' in params.config['configurations']['zeppelin-config']:
         zeppelin_conf_fs = self.getZeppelinConfFS(params)
-        if os.path.exists(zeppelin_conf_fs):
+
+        if self.is_path_exists_in_HDFS(zeppelin_conf_fs, params.zeppelin_user):
           # copy from hdfs to /etc/zeppelin/conf/interpreter.json
           params.HdfsResource(interpreter_config,
                               type="file",
                               action="download_on_execute",
                               source=zeppelin_conf_fs,
+                              user=params.zeppelin_user,
                               group=params.zeppelin_group,
                               owner=params.zeppelin_user)
         else:
@@ -353,6 +361,7 @@ class Master(Script):
                             type="file",
                             action="create_on_execute",
                             source=interpreter_config,
+                            user=params.zeppelin_user,
                             group=params.zeppelin_group,
                             owner=params.zeppelin_user,
                             replace_existing_files=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/84e616da/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py b/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
index e8ef262..3064880 100644
--- a/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
+++ b/ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
@@ -305,67 +305,32 @@ class TestZeppelin070(RMFTestCase):
                               security_enabled=False,
                               )
 
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
-                              content=interpreter_json_generated.template_after_base,
+                          content=interpreter_json_generated.template_after_base,
+                          owner='zeppelin',
+                          group='zeppelin',
+                          )
+
+    self.assertResourceCalled('HdfsResource',
+                              '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
+                              security_enabled=False,
+                              hadoop_bin_dir='/usr/hdp/2.5.0.0-1235/hadoop/bin',
+                              keytab=UnknownConfigurationMock(),
+                              source='/etc/zeppelin/conf/interpreter.json',
+                              default_fs='hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hdfs_site={u'a': u'b'},
+                              kinit_path_local='/usr/bin/kinit',
+                              principal_name=UnknownConfigurationMock(),
+                              user='zeppelin',
                               owner='zeppelin',
                               group='zeppelin',
+                              replace_existing_files=True,
+                              hadoop_conf_dir='/usr/hdp/2.5.0.0-1235/hadoop/conf',
+                              type='file',
+                              action=['create_on_execute'],
                               )
 
-    self.assertResourceCalled('HdfsResource', '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/etc/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        replace_existing_files = True,
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['create_on_execute'],
-    )
-
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
                               content=interpreter_json_generated.template_after_without_spark_and_livy,
                               owner='zeppelin',
@@ -383,7 +348,7 @@ class TestZeppelin070(RMFTestCase):
         hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
+        user = 'zeppelin',
         owner = 'zeppelin',
         group = 'zeppelin',
         hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
@@ -391,24 +356,6 @@ class TestZeppelin070(RMFTestCase):
         action = ['create_on_execute'],
     )
 
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
                               content=interpreter_json_generated.template_after_kerberos,
                               owner='zeppelin',
@@ -421,12 +368,12 @@ class TestZeppelin070(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         source = '/etc/zeppelin/conf/interpreter.json',
         default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        replace_existing_files = True,
         hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
+        replace_existing_files = True,
+        user = 'zeppelin',
         owner = 'zeppelin',
         group = 'zeppelin',
         hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',


[37/50] [abbrv] ambari git commit: Revert "AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar)."

Posted by ja...@apache.org.
Revert "AMBARI-22162. Move out the druid configurations from hive-site to hive-interactive-site. (Slim Bouguerra via Swapan Shridhar)."

This reverts commit ce2a0a00921bde8d780c82561902773f76431fce.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3b8e8071
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3b8e8071
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3b8e8071

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 3b8e807100a988fafc4d55e7a0ad7644fac6cef2
Parents: 8cffd72
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 9 16:59:04 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 9 16:59:04 2017 -0400

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml | 217 ++++++++++++++++++
 .../configuration/hive-interactive-site.xml     | 225 -------------------
 .../stacks/HDP/2.6/services/stack_advisor.py    |  12 +-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |  21 +-
 4 files changed, 234 insertions(+), 241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index 69d1c69..d66cf4c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -451,6 +451,223 @@ limitations under the License.
     <on-ambari-upgrade add="false"/>
   </property>
 
+  <!-- Druid related properties -->
+  <property>
+    <name>hive.druid.broker.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-router</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.uri</name>
+    <value>jdbc:mysql://localhost:3355/druid</value>
+    <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.connectURI</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.coordinator.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-coordinator</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.password</name>
+    <value>{{druid_metadata_password}}</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Druid Metadata Password</display-name>
+    <description>Druid meta data storage password</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+   <property>
+    <name>hive.druid.metadata.username</name>
+    <value>druid</value>
+    <description>Username used to connect to druid metadata storage</description>
+    <on-ambari-upgrade add="false"/>
+     <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.user</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.indexer.segments.granularity</name>
+    <display-name>Default Granularity for the Druid segments</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>YEAR</value>
+        </entry>
+        <entry>
+          <value>MONTH</value>
+        </entry>
+        <entry>
+          <value>WEEK</value>
+        </entry>
+        <entry>
+          <value>DAY</value>
+        </entry>
+        <entry>
+          <value>HOUR</value>
+        </entry>
+        <entry>
+          <value>MINUTE</value>
+        </entry>
+        <entry>
+          <value>SECOND</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>MINUTE</value>
+    <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.partition.size.max</name>
+    <value>5000000</value>
+    <description>Maximum number of records per segment partition</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.memory.rownum.max</name>
+    <value>75000</value>
+    <description>Maximum number of records in memory while storing data in Druid</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.select.distribute</name>
+    <value>true</value>
+    <description>If it is set to true, we distribute the execution of Druid Select queries</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.druid.basePersistDirectory</name>
+    <value></value>
+    <description>
+      Local temporary directory used to persist intermediate indexing state,
+      if empty (recommended) will default to JVM system property java.io.tmpdir.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.storage.storageDirectory</name>
+    <value>{{druid_storage_dir}}</value>
+    <description>
+      Druid deep storage location for segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.druid.metadata.db.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>postgresql</value>
+          <label>POSTGRESQL</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>mysql</value>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>Druid metadata storage type</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.passiveWaitTimeMs</name>
+    <value>30000</value>
+    <description>
+      Wait time in ms default to 30 seconds.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.working.directory</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Default hdfs working directory used to store some intermediate metadata.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.maxTries</name>
+    <value>5</value>
+    <description>
+      Maximum number of http call retries before giving up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.bitmap.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>roaring</value>
+        </entry>
+        <entry>
+          <value>concise</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>roaring</value>
+    <description>Druid Coding algorithm use to encode the bitmaps</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
   <!-- This property is removed in HDP 2.5 and higher. -->
   <property>
     <name>atlas.rest.address</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
index 64cef3e..aae2efa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -124,229 +124,4 @@ limitations under the License.
     <on-ambari-upgrade add="false"/>
   </property>
 
-  <!-- Druid related properties -->
-  <property>
-    <name>hive.druid.broker.address.default</name>
-    <value>localhost:8082</value>
-    <description>Host name of druid router if any or broker</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-router</type>
-        <name>druid.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.metadata.uri</name>
-    <value>jdbc:mysql://localhost:3355/druid</value>
-    <description>URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.connectURI</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.coordinator.address.default</name>
-    <value>localhost:8082</value>
-    <description>Host name of druid router if any or broker</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-coordinator</type>
-        <name>druid.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.metadata.password</name>
-    <value>{{druid_metadata_password}}</value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Druid Metadata Password</display-name>
-    <description>Druid meta data storage password</description>
-    <value-attributes>
-      <type>password</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-   <property>
-    <name>hive.druid.metadata.username</name>
-    <value>druid</value>
-    <description>Username used to connect to druid metadata storage</description>
-    <on-ambari-upgrade add="false"/>
-     <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.user</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.druid.indexer.segments.granularity</name>
-    <display-name>Default Granularity for the Druid segments</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>YEAR</value>
-        </entry>
-        <entry>
-          <value>MONTH</value>
-        </entry>
-        <entry>
-          <value>WEEK</value>
-        </entry>
-        <entry>
-          <value>DAY</value>
-        </entry>
-        <entry>
-          <value>HOUR</value>
-        </entry>
-        <entry>
-          <value>MINUTE</value>
-        </entry>
-        <entry>
-          <value>SECOND</value>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>DAY</value>
-    <description>Default Granularity for the segments created by the Druid storage handler, this can be overridden per table using table property druid.segment.granularity </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-   <property>
-    <name>hive.druid.indexer.partition.size.max</name>
-    <value>1000000</value>
-    <description>Maximum number of records per segment partition</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-   <property>
-    <name>hive.druid.indexer.memory.rownum.max</name>
-    <value>75000</value>
-    <description>Maximum number of records in memory while storing data in Druid</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.select.distribute</name>
-    <value>true</value>
-    <description>If it is set to true, we distribute the execution of Druid Select queries</description>
-    <on-ambari-upgrade add="false"/>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.druid.basePersistDirectory</name>
-    <value></value>
-    <description>
-      Local temporary directory used to persist intermediate indexing state,
-      if empty (recommended) will default to JVM system property java.io.tmpdir.
-    </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.storage.storageDirectory</name>
-    <value>{{druid_storage_dir}}</value>
-    <description>
-      Druid deep storage location for segments.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hive.druid.metadata.db.type</name>
-    <display-name>Druid metadata storage type </display-name>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mysql</value>
-          <label>MYSQL</label>
-        </entry>
-        <entry>
-          <value>postgresql</value>
-          <label>POSTGRESQL</label>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>mysql</value>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <description>Druid metadata storage type</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.passiveWaitTimeMs</name>
-    <value>30000</value>
-    <description>
-      Wait time in ms default to 30 seconds.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.working.directory</name>
-    <value>/tmp/druid-indexing</value>
-    <description>
-      Default hdfs working directory used to store some intermediate metadata.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.maxTries</name>
-    <value>5</value>
-    <description>
-      Maximum number of http call retries before giving up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.bitmap.type</name>
-    <display-name>Druid metadata storage type </display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>roaring</value>
-        </entry>
-        <entry>
-          <value>concise</value>
-        </entry>
-      </entries>
-    </value-attributes>
-    <value>roaring</value>
-    <description>Druid Coding algorithm use to encode the bitmaps</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>hive.druid.http.read.timeout</name>
-    <value>PT10M</value>
-    <description>
-      Maximum number of http call retries before giving up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index b634e71..0d2925e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -605,7 +605,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     # druid is not in list of services to be installed
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if 'DRUID' in servicesList:
-        putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
+        putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
         if 'druid-coordinator' in services['configurations']:
             component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_COORDINATOR', services, hosts)
             if component_hosts is not None and len(component_hosts) > 0:
@@ -642,11 +642,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
             else:
                 druid_metadata_user = ""
 
-        putHiveInteractiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
-        putHiveInteractiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
-        putHiveInteractiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
-        putHiveInteractiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
-        putHiveInteractiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
+        putHiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
+        putHiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
+        putHiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
+        putHiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
+        putHiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
 
 
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b8e8071/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index fd7e438..2b4c656 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -75,19 +75,11 @@
             <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
               if-type="cluster-env" if-key="security_enabled" if-value="true"/>
           </definition>
-        </changes>
-      </component>
-      <component name="HIVE_SERVER_INTERACTIVE">
-        <changes>
-          <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
-            <type>tez-interactive-site</type>
-            <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
-          </definition>
           <definition xsi:type="configure" id="hdp_2_6_maint_druid_config_for_hive_hook" summary="Updating druid hive related properties">
             <type>hive-site</type>
             <set key="hive.druid.metadata.password" value="{{druid_metadata_password}}" if-type="druid-common"/>
-            <set key="hive.druid.indexer.segments.granularity" value="DAY" if-type="druid-common"/>
-            <set key="hive.druid.indexer.partition.size.max" value="1000000" if-type="druid-common"/>
+            <set key="hive.druid.indexer.segments.granularity" value="MINUTE" if-type="druid-common"/>
+            <set key="hive.druid.indexer.partition.size.max" value="5000000" if-type="druid-common"/>
             <set key="hive.druid.indexer.memory.rownum.max" value="75000" if-type="druid-common"/>
             <set key="hive.druid.select.distribute" value="true" if-type="druid-common"/>
             <set key="hive.druid.basePersistDirectory" value="" if-type="druid-common"/>
@@ -95,6 +87,15 @@
             <set key="hive.druid.passiveWaitTimeMs" value="30000" if-type="druid-common"/>
             <set key="hive.druid.working.directory" value="/tmp/druid-indexing" if-type="druid-common"/>
             <set key="hive.druid.bitmap.type" value="roaring" if-type="druid-common"/>
+
+          </definition>
+        </changes>
+      </component>
+      <component name="HIVE_SERVER_INTERACTIVE">
+        <changes>
+          <definition xsi:type="configure" id="llap_update_tez_shuffle_ssl_enable" summary="Update additional LLAP-Tez settings">
+            <type>tez-interactive-site</type>
+            <set key="tez.runtime.shuffle.ssl.enable" value="false"/>
           </definition>
         </changes>
       </component>


[09/50] [abbrv] ambari git commit: AMBARI-22147 Move stacks/HDP/3.0/kerberos.json to stacks/kerberos.json (dsen)

Posted by ja...@apache.org.
AMBARI-22147 Move stacks/HDP/3.0/kerberos.json to stacks/kerberos.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2fb4649e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2fb4649e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2fb4649e

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 2fb4649e2a9381b98c4867ac4a550f7c3ff7ed23
Parents: 01b79aa
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 19:39:24 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 19:39:24 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     | 54 +++++++------
 .../server/controller/StackVersionResponse.java | 29 -------
 .../internal/StackArtifactResourceProvider.java | 17 +----
 .../ambari/server/stack/ServiceDirectory.java   |  3 +-
 .../ambari/server/stack/StackDirectory.java     | 27 -------
 .../apache/ambari/server/stack/StackModule.java |  6 --
 .../apache/ambari/server/state/ServiceInfo.java |  2 +-
 .../apache/ambari/server/state/StackInfo.java   | 23 ------
 ambari-server/src/main/resources/kerberos.json  | 79 ++++++++++++++++++++
 .../resources/stacks/HDP/2.0.6/kerberos.json    | 79 --------------------
 .../main/resources/stacks/HDP/3.0/kerberos.json | 79 --------------------
 .../server/api/services/AmbariMetaInfoTest.java | 19 +++--
 .../ambari/server/stack/StackManagerTest.java   |  8 --
 .../KerberosDescriptorUpdateHelperTest.java     |  1 +
 ambari-server/src/test/resources/kerberos.json  | 42 +++++++++++
 .../resources/stacks/HDP/2.0.8/kerberos.json    | 42 -----------
 16 files changed, 170 insertions(+), 340 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 425d247..46ee65a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
-import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -106,6 +105,17 @@ public class AmbariMetaInfo {
    * Version of XML files with support of custom services and custom commands
    */
   public static final String SCHEMA_VERSION_2 = "2.0";
+
+  /**
+   * The filename for a Kerberos descriptor file at either the stack or service level
+   */
+  public static final String KERBEROS_DESCRIPTOR_FILE_NAME = "kerberos.json";
+
+  /**
+   * The filename for a Widgets descriptor file at either the stack or service level
+   */
+  public static final String WIDGETS_DESCRIPTOR_FILE_NAME = "widgets.json";
+
   private final static Logger LOG = LoggerFactory.getLogger(AmbariMetaInfo.class);
 
 
@@ -128,6 +138,7 @@ public class AmbariMetaInfo {
   private File serverVersionFile;
   private File commonWidgetsDescriptorFile;
   private File customActionRoot;
+  private String commonKerberosDescriptorFileLocation;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
 
@@ -217,6 +228,7 @@ public class AmbariMetaInfo {
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
 
+    commonKerberosDescriptorFileLocation = new File(conf.getResourceDirPath(), KERBEROS_DESCRIPTOR_FILE_NAME).getAbsolutePath();
     commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
@@ -1257,38 +1269,25 @@ public class AmbariMetaInfo {
   public KerberosDescriptor getKerberosDescriptor(String stackName, String stackVersion, boolean includePreconfigureData) throws AmbariException {
     StackInfo stackInfo = getStack(stackName, stackVersion);
 
-    KerberosDescriptor kerberosDescriptor = null;
+    KerberosDescriptor kerberosDescriptor = readKerberosDescriptorFromFile(getCommonKerberosDescriptorFileLocation());
 
+    if (kerberosDescriptor == null) {
+      LOG.warn("Couldn't read common Kerberos descriptor with path {%s}", getCommonKerberosDescriptorFileLocation());
+      kerberosDescriptor = new KerberosDescriptor();
+    }
     // Read in the stack-level Kerberos descriptor pre-configuration data
     if (includePreconfigureData) {
-      kerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorPreConfigurationFileLocation());
+      KerberosDescriptor preConfigureKerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorPreConfigurationFileLocation());
 
-      if (kerberosDescriptor != null) {
+      if (preConfigureKerberosDescriptor != null) {
         // Ensure the all services to be pre-configured are flagged appropriately.
-        Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+        Map<String, KerberosServiceDescriptor> serviceDescriptors = preConfigureKerberosDescriptor.getServices();
         if (serviceDescriptors != null) {
           for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
             serviceDescriptor.setPreconfigure(true);
           }
         }
-      }
-    }
-
-    // Read in the base stack-level Kerberos descriptor.
-    KerberosDescriptor stackKerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorFileLocation());
-    if (stackKerberosDescriptor == null) {
-      // If kerberosDescriptor is null and stackKerberosDescriptor is null, then ensure
-      // kerberosDescriptor is an empty KerberosDescriptor.
-      if (kerberosDescriptor == null) {
-        kerberosDescriptor = new KerberosDescriptor();
-      }
-    } else {
-      if (kerberosDescriptor == null) {
-        // If kerberosDescriptor is null; then set it to stackKerberosDescriptor.
-        kerberosDescriptor = stackKerberosDescriptor;
-      } else {
-        // If kerberosDescriptor is not null; then update it using stackKerberosDescriptor.
-        kerberosDescriptor.update(stackKerberosDescriptor);
+        kerberosDescriptor.update(preConfigureKerberosDescriptor);
       }
     }
 
@@ -1314,6 +1313,15 @@ public class AmbariMetaInfo {
   }
 
   /**
+   * Gets the path to the common Kerberos descriptor file
+   *
+   * @return a String containing the path to the common Kerberos descriptor file
+   */
+  protected String getCommonKerberosDescriptorFileLocation() {
+    return commonKerberosDescriptorFileLocation;
+  }
+
+  /**
    * Gets the requested service-level Kerberos descriptor(s)
    * <p/>
    * An array of descriptors are returned since the kerberos.json in a service directory may contain

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
index 8fa0bf9..38b32d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
@@ -42,13 +42,6 @@ public class StackVersionResponse implements Validable{
   private Map<String, Map<String, Map<String, String>>> configTypes;
 
   /**
-   * A File pointing to the stack-level Kerberos descriptor file
-   *
-   * This may be null if a relevant file is not available.
-   */
-  private File stackKerberosDescriptorFile;
-
-  /**
    * A Collection of Files pointing to the service-level Kerberos descriptor files
    *
    * This may be null or empty if no relevant files are available.
@@ -59,7 +52,6 @@ public class StackVersionResponse implements Validable{
   public StackVersionResponse(String stackVersion, String minUpgradeVersion,
                               boolean active, String parentVersion,
                               Map<String, Map<String, Map<String, String>>> configTypes,
-                              File stackKerberosDescriptorFile,
                               Collection<File> serviceKerberosDescriptorFiles,
                               Set<String> upgradePacks, boolean valid, Collection<String> errorSet, String minJdk, String maxJdk) {
     setStackVersion(stackVersion);
@@ -67,7 +59,6 @@ public class StackVersionResponse implements Validable{
     setActive(active);
     setParentVersion(parentVersion);
     setConfigTypes(configTypes);
-    setKerberosDescriptorFile(stackKerberosDescriptorFile);
     setServiceKerberosDescriptorFiles(serviceKerberosDescriptorFiles);
     setUpgradePacks(upgradePacks);
     setValid(valid);
@@ -176,26 +167,6 @@ public class StackVersionResponse implements Validable{
   }
 
   /**
-   * Gets a File pointing to the stack-level Kerberos descriptor
-   *
-   * @return a File pointing to the stack-level Kerberos descriptor, or null if no relevant file is
-   * available
-   */
-  @ApiModelProperty(hidden = true)
-  public File getStackKerberosDescriptorFile() {
-    return stackKerberosDescriptorFile;
-  }
-
-  /**
-   * Sets the stack-level Kerberos descriptor File
-   *
-   * @param stackKerberosDescriptorFile a File pointing to the stack-level Kerberos descriptor
-   */
-  public void setKerberosDescriptorFile(File stackKerberosDescriptorFile) {
-    this.stackKerberosDescriptorFile = stackKerberosDescriptorFile;
-  }
-
-  /**
    * Gets the Collection of Files pointing to the stack-specific service-level Kerberos descriptor
    * files
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index a7f7710..6bb421b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -483,7 +483,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
   private Map<String, Object> buildStackDescriptor(String stackName, String stackVersion)
       throws NoSuchParentResourceException, IOException {
 
-    KerberosDescriptor kerberosDescriptor = null;
+    KerberosDescriptor kerberosDescriptor = new KerberosDescriptor();
 
     AmbariManagementController controller = getManagementController();
     StackInfo stackInfo;
@@ -496,19 +496,8 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
 
     Collection<KerberosServiceDescriptor> serviceDescriptors = getServiceDescriptors(stackInfo);
 
-    String kerberosFileLocation = stackInfo.getKerberosDescriptorFileLocation();
-    if (kerberosFileLocation != null) {
-      kerberosDescriptor = kerberosDescriptorFactory.createInstance(new File(kerberosFileLocation));
-    } else if (! serviceDescriptors.isEmpty()) {
-      // service descriptors present with no stack descriptor,
-      // create an empty stack descriptor to hold services
-      kerberosDescriptor = new KerberosDescriptor();
-    }
-
-    if (kerberosDescriptor != null) {
-      for (KerberosServiceDescriptor descriptor : serviceDescriptors) {
-        kerberosDescriptor.putService(descriptor);
-      }
+    if (serviceDescriptors != null) {
+      serviceDescriptors.forEach(kerberosDescriptor::putService);
       return kerberosDescriptor.toMap();
     } else {
       return null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
index ae59b3f..119163e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
@@ -259,7 +260,7 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
     File af = new File(directory, StackDirectory.SERVICE_ALERT_FILE_NAME);
     alertsFile = af.exists() ? af : null;
 
-    File kdf = new File(directory, StackDirectory.KERBEROS_DESCRIPTOR_FILE_NAME);
+    File kdf = new File(directory, AmbariMetaInfo.KERBEROS_DESCRIPTOR_FILE_NAME);
     kerberosDescriptorFile = kdf.exists() ? kdf : null;
 
     File rco = new File(directory, StackDirectory.RCO_FILE_NAME);

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index e3c586b..daf8e7c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -57,14 +57,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   public static final String SERVICE_ALERT_FILE_NAME = "alerts.json";
   public static final String SERVICE_ADVISOR_FILE_NAME = "service_advisor.py";
   /**
-   * The filename for a Kerberos descriptor file at either the stack or service level
-   */
-  public static final String KERBEROS_DESCRIPTOR_FILE_NAME = "kerberos.json";
-  /**
-   * The filename for a Widgets descriptor file at either the stack or service level
-   */
-  public static final String WIDGETS_DESCRIPTOR_FILE_NAME = "widgets.json";
-  /**
    * The filename for a Kerberos descriptor preconfigure file at either the stack or service level
    */
   public static final String KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME = "kerberos_preconfigure.json";
@@ -84,11 +76,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String rcoFilePath;
 
   /**
-   * kerberos descriptor file path
-   */
-  private String kerberosDescriptorFilePath;
-
-  /**
    * kerberos descriptor (preconfigure) file path
    */
   private String kerberosDescriptorPreconfigureFilePath;
@@ -210,15 +197,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) Kerberos descriptor file
-   *
-   * @return the path to the (stack-level) Kerberos descriptor file
-   */
-  public String getKerberosDescriptorFilePath() {
-    return kerberosDescriptorFilePath;
-  }
-
-  /**
    * Obtain the path to the (stack-level) Kerberos descriptor pre-configuration file
    *
    * @return the path to the (stack-level) Kerberos descriptor pre-configuration file
@@ -300,11 +278,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       rcoFilePath = getAbsolutePath() + File.separator + RCO_FILE_NAME;
     }
 
-    if (subDirs.contains(KERBEROS_DESCRIPTOR_FILE_NAME)) {
-      // kerberosDescriptorFilePath is expected to be absolute
-      kerberosDescriptorFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_FILE_NAME;
-    }
-
     if (subDirs.contains(KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME)) {
       // kerberosDescriptorPreconfigureFilePath is expected to be absolute
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 71235f3..e88bbf2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -284,11 +284,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     mergeConfigurations(parentStack, allStacks, commonServices, extensions);
     mergeRoleCommandOrder(parentStack);
 
-    // grab stack level kerberos.json from parent stack
-    if (stackInfo.getKerberosDescriptorFileLocation() == null) {
-      stackInfo.setKerberosDescriptorFileLocation(parentStack.getModuleInfo().getKerberosDescriptorFileLocation());
-    }
-
     // grab stack level kerberos_preconfigure.json from parent stack
     if (stackInfo.getKerberosDescriptorPreConfigurationFileLocation() == null) {
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
@@ -567,7 +562,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setActive(smx.getVersion().isActive());
       stackInfo.setParentStackVersion(smx.getExtends());
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
-      stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
index ef18bd9..8fe6583 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
@@ -134,7 +134,7 @@ public class ServiceInfo implements Validable{
   private String parent;
 
   @XmlElement(name = "widgetsFileName")
-  private String widgetsFileName = StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
+  private String widgetsFileName = AmbariMetaInfo.WIDGETS_DESCRIPTOR_FILE_NAME;
 
   @XmlElement(name = "metricsFileName")
   private String metricsFileName = StackDirectory.SERVICE_METRIC_FILE_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 3efc997..c32e907 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -325,9 +325,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
 
   public StackVersionResponse convertToResponse() {
 
-    // Get the stack-level Kerberos descriptor file path
-    String stackDescriptorFileFilePath = getKerberosDescriptorFileLocation();
-
     // Collect the services' Kerberos descriptor files
     Collection<ServiceInfo> serviceInfos = getServices();
     // The collection of service descriptor files. A Set is being used because some Kerberos descriptor
@@ -345,7 +342,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
 
     return new StackVersionResponse(getVersion(), getMinUpgradeVersion(),
         isActive(), getParentStackVersion(), getConfigTypeAttributes(),
-        (stackDescriptorFileFilePath == null) ? null : new File(stackDescriptorFileFilePath),
         serviceDescriptorFiles,
         null == upgradePacks ? Collections.emptySet() : upgradePacks.keySet(),
         isValid(), getErrors(), getMinJdk(), getMaxJdk());
@@ -392,25 +388,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   }
 
   /**
-   * Gets the path to the stack-level Kerberos descriptor file
-   *
-   * @return a String containing the path to the stack-level Kerberos descriptor file
-   */
-  public String getKerberosDescriptorFileLocation() {
-    return kerberosDescriptorFileLocation;
-  }
-
-  /**
-   * Sets the path to the stack-level Kerberos descriptor file
-   *
-   * @param kerberosDescriptorFileLocation a String containing the path to the stack-level Kerberos
-   *                                       descriptor file
-   */
-  public void setKerberosDescriptorFileLocation(String kerberosDescriptorFileLocation) {
-    this.kerberosDescriptorFileLocation = kerberosDescriptorFileLocation;
-  }
-
-  /**
    * Gets the path to the stack-level Kerberos descriptor pre-configuration file
    *
    * @return a String containing the path to the stack-level Kerberos descriptor pre-configuration file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/kerberos.json b/ambari-server/src/main/resources/kerberos.json
new file mode 100644
index 0000000..3787db1
--- /dev/null
+++ b/ambari-server/src/main/resources/kerberos.json
@@ -0,0 +1,79 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": "",
+    "principal_suffix": "-${cluster_name|toLower()}"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server${principal_suffix}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
deleted file mode 100644
index 3787db1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
+++ /dev/null
@@ -1,79 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": "",
-    "principal_suffix": "-${cluster_name|toLower()}"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type": "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
-        "type": "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username": "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ],
-  "services": [
-    {
-      "name": "AMBARI",
-      "components": [
-        {
-          "name": "AMBARI_SERVER",
-          "identities": [
-            {
-              "name": "ambari-server",
-              "principal": {
-                "value": "ambari-server${principal_suffix}@${realm}",
-                "type": "user",
-                "configuration": "cluster-env/ambari_principal_name"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari.server.keytab",
-                "owner": {
-                  "access": "r"
-                }
-              }
-            },
-            {
-              "name" : "ambari-server_spnego",
-              "reference" : "/spnego"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
deleted file mode 100644
index 3787db1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
+++ /dev/null
@@ -1,79 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": "",
-    "principal_suffix": "-${cluster_name|toLower()}"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type": "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
-        "type": "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username": "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ],
-  "services": [
-    {
-      "name": "AMBARI",
-      "components": [
-        {
-          "name": "AMBARI_SERVER",
-          "identities": [
-            {
-              "name": "ambari-server",
-              "principal": {
-                "value": "ambari-server${principal_suffix}@${realm}",
-                "type": "user",
-                "configuration": "cluster-env/ambari_principal_name"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari.server.keytab",
-                "owner": {
-                  "access": "r"
-                }
-              }
-            },
-            {
-              "name" : "ambari-server_spnego",
-              "reference" : "/spnego"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 25e8d04..9285526 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -151,11 +151,12 @@ public class AmbariMetaInfoTest {
   public static void beforeClass() throws Exception {
     File stacks = new File("src/test/resources/stacks");
     File version = new File("src/test/resources/version");
+    File resourcesRoot = new File("src/test/resources/");
     if (System.getProperty("os.name").contains("Windows")) {
       stacks = new File(ClassLoader.getSystemClassLoader().getResource("stacks").getPath());
       version = new File(new File(ClassLoader.getSystemClassLoader().getResource("").getPath()).getParent(), "version");
     }
-    metaInfo = createAmbariMetaInfo(stacks, version);
+    metaInfo = createAmbariMetaInfo(stacks, version, resourcesRoot);
   }
 
   @AfterClass
@@ -450,7 +451,7 @@ public class AmbariMetaInfoTest {
       f3.createNewFile();
     }
 
-    AmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version);
+    AmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version, new File(""));
 
     // Tests the stack is loaded as expected
     getServices();
@@ -737,7 +738,7 @@ public class AmbariMetaInfoTest {
     LOG.info("Stacks file " + stackRoot.getAbsolutePath());
 
 
-    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRoot, version);
+    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRoot, version, new File(""));
     Assert.assertEquals(1, ambariMetaInfo.getStackManager().getStacks().size());
     Assert.assertEquals(false, ambariMetaInfo.getStackManager().getStack("HDP", "0.1").isValid());
     Assert.assertEquals(2, ambariMetaInfo.getStackManager().getStack("HDP", "0.1").getErrors().size());
@@ -1820,8 +1821,7 @@ public class AmbariMetaInfoTest {
 
   @Test
   public void testReadKerberosDescriptorFromFile() throws AmbariException {
-    StackInfo stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.8");
-    String path = stackInfo.getKerberosDescriptorFileLocation();
+    String path = metaInfo.getCommonKerberosDescriptorFileLocation();
     KerberosDescriptor descriptor = metaInfo.readKerberosDescriptorFromFile(path);
 
     Assert.assertNotNull(descriptor);
@@ -1899,7 +1899,7 @@ public class AmbariMetaInfoTest {
     File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
 
     Assert.assertNotNull(widgetsFile);
-    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+    Assert.assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
   }
 
   private File getStackRootTmp(String buildDir) {
@@ -1937,16 +1937,17 @@ public class AmbariMetaInfoTest {
   private TestAmbariMetaInfo setupTempAmbariMetaInfoExistingDirs(String buildDir) throws Exception {
     File version = getVersion();
     File stackRootTmp = getStackRootTmp(buildDir);
-    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version);
+    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version, new File(""));
     return ambariMetaInfo;
   }
 
   private static TestAmbariMetaInfo createAmbariMetaInfo(File stackRoot,
-    File versionFile) throws Exception {
+    File versionFile, File resourcesRoot) throws Exception {
 
     Properties properties = new Properties();
     properties.setProperty(Configuration.METADATA_DIR_PATH.getKey(), stackRoot.getPath());
     properties.setProperty(Configuration.SERVER_VERSION_FILE.getKey(), versionFile.getPath());
+    properties.setProperty(Configuration.RESOURCES_DIR.getKey(), resourcesRoot.getPath());
     Configuration configuration = new Configuration(properties);
 
     TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(configuration);
@@ -2056,9 +2057,11 @@ public class AmbariMetaInfoTest {
       Configuration config = createNiceMock(Configuration.class);
       if (System.getProperty("os.name").contains("Windows")) {
         expect(config.getSharedResourcesDirPath()).andReturn(ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
+        expect(config.getResourceDirPath()).andReturn(ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
       }
       else {
         expect(config.getSharedResourcesDirPath()).andReturn("./src/test/resources").anyTimes();
+        expect(config.getResourceDirPath()).andReturn("./src/test/resources").anyTimes();
       }
 
       replay(config);

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index f53e111..b8f513c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -723,14 +723,6 @@ public class StackManagerTest {
 
   }
 
-  @Test
-  public void testInheritKerberosDescriptor() throws Exception {
-    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
-    String stacksFolder = ClassLoader.getSystemClassLoader().getResource("stacks").getPath();
-    assertEquals(new File(stacksFolder, "HDP/2.0.8/kerberos.json").getAbsolutePath(),
-        stack.getKerberosDescriptorFileLocation());
-  }
-
   /**
    * Tests that {@link UpgradePack} and {@link ConfigUpgradePack} instances are correctly initialized
    * post-unmarshalling.

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index 293e050..a3d13b1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@ -66,6 +66,7 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
         properties.put("common.services.path", "src/main/resources/common-services");
         properties.put("server.version.file", "target/version");
         properties.put("custom.action.definitions", "/tmp/nofile");
+        properties.put("resources.dir", "src/main/resources");
         Configuration configuration = new Configuration(properties);
 
         install(new FactoryModuleBuilder().build(StackManagerFactory.class));

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/resources/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos.json b/ambari-server/src/test/resources/kerberos.json
new file mode 100644
index 0000000..14eefbf
--- /dev/null
+++ b/ambari-server/src/test/resources/kerberos.json
@@ -0,0 +1,42 @@
+{
+  "properties": {
+    "realm": "${cluster-env/kerberos_domain}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    }
+  ],
+  "auth_to_local_properties" : [
+    "hadoop.security.auth_to_local"
+  ],
+  "configurations": [
+    {
+      "core-site": {
+        "hadoop.security.authentication": "kerberos",
+        "hadoop.rpc.protection": "authentication",
+        "hadoop.security.authorization": "true",
+        "hadoop.security.auth_to_local": "",
+        "hadoop.proxyuser.superuser.hosts": "",
+        "hadoop.proxyuser.superuser.groups": ""
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json b/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
deleted file mode 100644
index 14eefbf..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-  "properties": {
-    "realm": "${cluster-env/kerberos_domain}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": ""
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    }
-  ],
-  "auth_to_local_properties" : [
-    "hadoop.security.auth_to_local"
-  ],
-  "configurations": [
-    {
-      "core-site": {
-        "hadoop.security.authentication": "kerberos",
-        "hadoop.rpc.protection": "authentication",
-        "hadoop.security.authorization": "true",
-        "hadoop.security.auth_to_local": "",
-        "hadoop.proxyuser.superuser.hosts": "",
-        "hadoop.proxyuser.superuser.groups": ""
-      }
-    }
-  ]
-}


[10/50] [abbrv] ambari git commit: AMBARI-14714. fix compiler errors (benyoka)

Posted by ja...@apache.org.
AMBARI-14714. fix compiler errors (benyoka)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/53333dad
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/53333dad
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/53333dad

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 53333dad30611575f7c853a83e21b5dce0c85c7f
Parents: 1e90bd5
Author: Balazs Bence Sari <be...@apache.org>
Authored: Fri Oct 6 19:36:36 2017 +0200
Committer: Balazs Bence Sari <be...@apache.org>
Committed: Fri Oct 6 19:36:36 2017 +0200

----------------------------------------------------------------------
 .../apache/ambari/server/controller/StackV2.java    | 16 +++++++++++++---
 .../ambari/server/topology/BlueprintImplV2.java     |  6 +++---
 .../apache/ambari/server/topology/BlueprintV2.java  |  6 +++---
 .../apache/ambari/server/topology/HostGroupV2.java  |  4 ++--
 .../org/apache/ambari/server/topology/Service.java  |  4 ++--
 5 files changed, 23 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/53333dad/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
index 3e34951..8a2208d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
@@ -18,14 +18,24 @@
 
 package org.apache.ambari.server.controller;
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.AutoDeployInfo;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.Cardinality;
 import org.apache.ambari.server.topology.Configuration;
 
-import java.util.*;
-
 /**
  * Encapsulates stack information.
  */

http://git-wip-us.apache.org/repos/asf/ambari/blob/53333dad/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
index fa893b3b..9bde795 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
@@ -19,13 +19,13 @@
 
 package org.apache.ambari.server.topology;
 
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.orm.entities.BlueprintEntity;
-
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.orm.entities.BlueprintEntity;
+
 /**
  * Blueprint implementation.
  */

http://git-wip-us.apache.org/repos/asf/ambari/blob/53333dad/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
index 134a614..1fea966 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
@@ -18,13 +18,13 @@
 
 package org.apache.ambari.server.topology;
 
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.orm.entities.BlueprintEntity;
-
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.orm.entities.BlueprintEntity;
+
 /**
  * Blueprint representation.
  */

http://git-wip-us.apache.org/repos/asf/ambari/blob/53333dad/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
index 506fed0..df26b68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
@@ -18,11 +18,11 @@
 
 package org.apache.ambari.server.topology;
 
-import org.apache.ambari.server.controller.internal.ProvisionAction;
-
 import java.util.Collection;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.server.controller.internal.ProvisionAction;
+
 /**
  * Host Group representation.
  */

http://git-wip-us.apache.org/repos/asf/ambari/blob/53333dad/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
index 66c0dc3..62acdfd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
@@ -19,10 +19,10 @@
 package org.apache.ambari.server.topology;
 
 
-import org.apache.ambari.server.controller.internal.Stack;
-
 import java.util.Set;
 
+import org.apache.ambari.server.controller.internal.Stack;
+
 public class Service {
 
   private final String type;


[49/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714

Posted by ja...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/045d9bfe
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/045d9bfe
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/045d9bfe

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 045d9bfe3e62fc68c3879ea2b874fabe11bf1622
Parents: eb6b21c 03273bd
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Oct 10 22:24:45 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Oct 10 22:24:45 2017 -0700

----------------------------------------------------------------------
 .../libraries/script/script.py                  |  68 ++-
 ambari-logsearch/ambari-logsearch-web/pom.xml   |  11 +-
 .../flume/FlumeTimelineMetricsSinkTest.java     |  27 +-
 .../AmbariCustomCommandExecutionHelper.java     |  10 +
 .../AmbariManagementControllerImpl.java         |   6 +-
 .../controller/AmbariManagementHelper.java      |   2 +-
 .../server/controller/KerberosHelper.java       |   9 +-
 .../server/controller/KerberosHelperImpl.java   | 244 ++++++----
 .../ServiceComponentHostResponse.java           |  15 +
 .../BlueprintConfigurationProcessor.java        |  81 +++-
 .../internal/HostComponentResourceProvider.java |   2 +
 .../utilities/RemovableIdentities.java          |   2 +-
 .../upgrade/HostVersionOutOfSyncListener.java   |  11 +
 .../ambari/server/metadata/ActionMetadata.java  |   1 +
 .../kerberos/CreateKeytabFilesServerAction.java |   2 +-
 .../kerberos/CreatePrincipalsServerAction.java  |   2 +-
 .../kerberos/KerberosServerAction.java          |  71 ++-
 .../PrepareDisableKerberosServerAction.java     |   3 +-
 .../PrepareEnableKerberosServerAction.java      |   6 +-
 .../PrepareKerberosIdentitiesServerAction.java  | 142 ++++--
 .../ambari/server/stack/ExtensionHelper.java    |  57 ++-
 .../apache/ambari/server/stack/StackModule.java |  33 +-
 .../ambari/server/state/ConfigHelper.java       | 164 ++++++-
 .../ambari/server/state/PropertyInfo.java       |  29 ++
 .../ambari/server/state/RefreshCommand.java     |  52 +++
 .../state/RefreshCommandConfiguration.java      |  71 +++
 .../apache/ambari/server/state/StackInfo.java   |  10 +
 .../ambari/server/state/UpgradeContext.java     |  24 +-
 .../kerberos/AbstractKerberosDescriptor.java    |  25 +
 .../AbstractKerberosDescriptorContainer.java    |  18 +-
 .../kerberos/KerberosIdentityDescriptor.java    | 160 +++++++
 .../svccomphost/ServiceComponentHostImpl.java   |  10 +
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |  12 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   3 +
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  13 +-
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |  62 ++-
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   5 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  21 +
 .../2.1.0.2.0/package/scripts/install_params.py |   6 -
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  21 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   2 -
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |  10 +
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/3.0.0.3.0/package/scripts/datanode.py  |  13 +-
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      |  62 ++-
 .../3.0.0.3.0/package/scripts/hdfs_client.py    |   5 +
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |  20 +
 .../3.0.0.3.0/package/scripts/install_params.py |   6 -
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |  21 +-
 .../3.0.0.3.0/package/scripts/params_linux.py   |   2 -
 .../HDFS/3.0.0.3.0/package/scripts/snamenode.py |  10 +
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   6 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |   3 -
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py    |   5 +-
 .../4.2.0.3.0/package/scripts/params_linux.py   |   3 -
 .../ZEPPELIN/0.7.0/package/scripts/master.py    |  33 +-
 .../src/main/resources/configuration-schema.xsd |  12 +
 .../src/main/resources/properties.json          |   1 +
 .../services/HDFS/configuration/hdfs-site.xml   |   3 +
 .../stacks/HDP/2.6/services/DRUID/kerberos.json |  30 --
 .../server/controller/KerberosHelperTest.java   |   5 -
 .../BlueprintConfigurationProcessorTest.java    |  32 ++
 .../utilities/KerberosIdentityCleanerTest.java  |   8 +-
 .../HostVersionOutOfSyncListenerTest.java       |  14 +-
 .../server/stack/StackManagerExtensionTest.java |  31 +-
 .../ambari/server/state/ConfigHelperTest.java   |  76 ++-
 .../ambari/server/state/PropertyInfoTest.java   |  20 +
 .../ambari/server/state/UpgradeContextTest.java |  60 ++-
 .../state/kerberos/KerberosDescriptorTest.java  | 150 +++++-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |   2 +
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  17 +
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  33 ++
 .../stacks/2.6/ZEPPELIN/test_zeppelin_070.py    | 101 +---
 .../src/test/python/stacks/utils/RMFTestCase.py |   4 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |   8 +
 .../stacks_with_extensions/HDP/0.4/metainfo.xml |  22 +
 .../HDP/0.4/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.4/services/HBASE/metainfo.xml         |  26 ++
 .../0.4/services/HDFS/configuration/global.xml  | 145 ++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 ++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 ++++++++++++++++
 .../HDP/0.4/services/HDFS/metainfo.xml          |  30 ++
 .../0.4/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.4/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.4/services/MAPREDUCE/metainfo.xml     |  23 +
 .../HDP/0.4/services/ZOOKEEPER/metainfo.xml     |  26 ++
 ambari-web/app/assets/test/tests.js             |   2 +
 ambari-web/app/controllers.js                   |   1 +
 .../app/controllers/main/service/info/metric.js | 468 +++++++++++++++++++
 .../controllers/main/service/info/summary.js    | 449 +-----------------
 .../service/widgets/create/wizard_controller.js |   2 +-
 ambari-web/app/messages.js                      |   1 +
 ambari-web/app/styles/common.less               |   1 +
 .../app/styles/enhanced_service_dashboard.less  |  26 +-
 .../app/styles/theme/bootstrap-ambari.css       |  64 ++-
 ambari-web/app/styles/top-nav.less              |  22 +
 ambari-web/app/templates/application.hbs        |  39 +-
 .../app/templates/main/service/info/metrics.hbs | 104 +++++
 .../app/templates/main/service/info/summary.hbs |  84 ----
 ambari-web/app/templates/main/service/item.hbs  |   7 +-
 ambari-web/app/views.js                         |   1 +
 ambari-web/app/views/main/service/info/menu.js  |   7 +
 .../app/views/main/service/info/metrics_view.js | 290 ++++++++++++
 .../app/views/main/service/info/summary.js      | 315 ++-----------
 ambari-web/app/views/main/service/item.js       |   6 +
 .../main/service/info/metric_test.js            | 110 +++++
 .../main/service/info/summary_test.js           |  76 ---
 .../main/service/info/metrics_view_test.js      | 334 +++++++++++++
 .../views/main/service/info/summary_test.js     | 281 +----------
 .../savedQueries/SavedQueryService.java         |  46 +-
 .../resources/ui/app/routes/queries/query.js    |  95 ++--
 .../resources/ui/app/services/saved-queries.js  |  21 +
 116 files changed, 4817 insertions(+), 1700 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 8f1dc7c,6708560..696d395
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@@ -83,28 -80,39 +83,29 @@@ public class HostComponentResourceProvi
    // ----- Property ID constants ---------------------------------------------
  
    // Host Components
 -  public static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
 -  public static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "service_name");
 -  public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "component_name");
 -  public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "display_name");
 -  public static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "host_name");
 -  public static final String HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "public_host_name");
 -  public static final String HOST_COMPONENT_STATE_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "state");
 -  public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "desired_state");
 -  public static final String HOST_COMPONENT_VERSION_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "version");
 -  public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
 -  public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION
 -    = PropertyHelper.getPropertyId("HostRoles", "desired_repository_version");
 -  public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
 -  public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
 -  public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
 -  public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID
 -      = PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
 -  public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
 -      = "HostRoles/maintenance_state";
 -  public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = "HostRoles/upgrade_state";
 +  public static final String HOST_COMPONENT_CLUSTER_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_id";
 +  public static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_name";
 +  public static final String HOST_COMPONENT_SERVICE_GROUP_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_id";
 +  public static final String HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_group_name";
 +  public static final String HOST_COMPONENT_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id";
 +  public static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name";
 +  public static final String HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type";
 +  public static final String HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "id";
 +  public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name";
 +  public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name";
 +  public static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "host_name";
 +  public static final String HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "public_host_name";
 +  public static final String HOST_COMPONENT_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "state";
 +  public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_state";
 +  public static final String HOST_COMPONENT_VERSION_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "version";
 +  public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_stack_id";
 +  public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_repository_version";
 +  public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "actual_configs";
 +  public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stale_configs";
++  public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
 +  public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_admin_state";
 +  public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "maintenance_state";
 +  public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "upgrade_state";
  
    //Parameters from the predicate
    private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
@@@ -246,26 -222,36 +247,27 @@@
  
      for (ServiceComponentHostResponse response : responses) {
        Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 -      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
 -              response.getClusterName(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
 -              response.getServiceName(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
 -              response.getComponentName(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID,
 -              response.getDisplayName(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
 -              response.getHostname(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID,
 -          response.getPublicHostname(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID,
 -              response.getLiveState(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
 -              response.getDesiredState(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(),
 -          requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
 -              response.getDesiredStackVersion(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID,
 -              response.getActualConfigs(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
 -              response.isStaleConfig(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID,
 -              response.isReloadConfig(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
 -              response.getUpgradeState(), requestedIds);
 -      setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,
 -          response.getDesiredRepositoryVersion(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_ID_PROPERTY_ID, response.getClusterId(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_SERVICE_GROUP_ID_PROPERTY_ID, response.getServiceGroupId(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, response.getServiceGroupName(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID, response.getPublicHostname(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID, response.getLiveState(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, response.getDesiredStackVersion(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID, response.getActualConfigs(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID, response.isStaleConfig(), requestedIds);
++      setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID, response.isReloadConfig(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, response.getUpgradeState(), requestedIds);
 +      setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, response.getDesiredRepositoryVersion(), requestedIds);
  
        if (response.getAdminState() != null) {
          setResourceProperty(resource, HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID,

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/properties.json
index 3e73217,1d12f83..a995049
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@@ -57,11 -53,10 +57,12 @@@
          "HostRoles/actual_configs",
          "params/run_smoke_test",
          "HostRoles/stale_configs",
+         "HostRoles/reload_configs",
          "HostRoles/desired_admin_state",
          "HostRoles/maintenance_state",
 +        "HostRoles/service_id",
          "HostRoles/service_name",
 +        "HostRoles/service_type",
          "HostRoles/upgrade_state",
          "_"
      ],

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/045d9bfe/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------


[38/50] [abbrv] ambari git commit: AMBARI-22183: Fix ambari-web unit tests in branch-feature-AMBARI-14714 (jluniya)

Posted by ja...@apache.org.
AMBARI-22183: Fix ambari-web unit tests in branch-feature-AMBARI-14714 (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb6b21c0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb6b21c0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb6b21c0

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: eb6b21c00aee12372d6badd0ebe98c680378dcb0
Parents: c36afcd
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Mon Oct 9 23:39:59 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Mon Oct 9 23:39:59 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/controllers/global/update_controller.js           | 2 +-
 ambari-web/test/controllers/installer_test.js                    | 2 +-
 .../controllers/main/admin/kerberos/step3_controller_test.js     | 2 +-
 ambari-web/test/controllers/main/service/add_controller_test.js  | 4 ++--
 ambari-web/test/controllers/wizard/step8_test.js                 | 2 +-
 ambari-web/test/mappers/service_mapper_test.js                   | 4 ++--
 ambari-web/test/mappers/service_metrics_mapper_test.js           | 4 ++--
 ambari-web/test/mixins/common/configs/configs_saver_test.js      | 2 +-
 8 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index 0758686..eee4720 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -537,7 +537,7 @@ App.UpdateController = Em.Controller.extend({
 
   updateComponentsState: function (callback) {
     var testUrl = '/data/services/HDP2/components_state.json';
-    var realUrl = '/components/?fields=ServiceComponentInfo/service_name,ServiceComponentInfo/service_display_name,' +
+    var realUrl = '/components/?fields=ServiceComponentInfo/service_name,ServiceComponentInfo/service_type,' +
       'ServiceComponentInfo/category,ServiceComponentInfo/installed_count,ServiceComponentInfo/started_count,ServiceComponentInfo/init_count,ServiceComponentInfo/install_failed_count,ServiceComponentInfo/unknown_count,ServiceComponentInfo/total_count,ServiceComponentInfo/display_name,host_components/HostRoles/host_name&minimal_response=true';
     var url = this.getUrl(testUrl, realUrl);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/controllers/installer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/installer_test.js b/ambari-web/test/controllers/installer_test.js
index d2d4f2c..a33af65 100644
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@ -528,7 +528,7 @@ describe('App.InstallerController', function () {
       });
 
       it('should call loadStacksVersions, stack versions not loaded', function () {
-        installerController.loadMap['1'][1].callback.call(checker, false).then(function(data){
+        installerController.loadMap['step1'][1].callback.call(checker, false).then(function(data){
           expect(data).to.be.true;
         });
         expect(checker.loadStacksVersions.calledOnce).to.be.true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/controllers/main/admin/kerberos/step3_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/kerberos/step3_controller_test.js b/ambari-web/test/controllers/main/admin/kerberos/step3_controller_test.js
index 541b9bd..dbe48d0 100644
--- a/ambari-web/test/controllers/main/admin/kerberos/step3_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/kerberos/step3_controller_test.js
@@ -102,7 +102,7 @@ describe('App.KerberosWizardStep3Controller', function() {
         data: {
           context: Em.I18n.t('requestInfo.kerberosService'),
           ServiceInfo: {"state": "INSTALLED"},
-          urlParams: "ServiceInfo/state=INSTALLED&ServiceInfo/service_display_name=KERBEROS"
+          urlParams: "ServiceInfo/state=INSTALLED&ServiceInfo/service_name=KERBEROS"
         },
         success: 'startPolling',
         error: 'onTaskError'

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/controllers/main/service/add_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/add_controller_test.js b/ambari-web/test/controllers/main/service/add_controller_test.js
index e955b4d..1119176 100644
--- a/ambari-web/test/controllers/main/service/add_controller_test.js
+++ b/ambari-web/test/controllers/main/service/add_controller_test.js
@@ -34,7 +34,7 @@ describe('App.AddServiceController', function() {
       res: {
         "context": Em.I18n.t('requestInfo.installServices'),
         "ServiceInfo": {"state": "INSTALLED"},
-        "urlParams": "ServiceInfo/service_display_name.in(YARN,HBASE)"
+        "urlParams": "ServiceInfo/service_name.in(YARN,HBASE)"
       }
     },
     {
@@ -42,7 +42,7 @@ describe('App.AddServiceController', function() {
       res: {
         "context": Em.I18n.t('requestInfo.installServices'),
         "ServiceInfo": {"state": "INSTALLED"},
-        "urlParams": "ServiceInfo/service_display_name.in(OOZIE,HDFS,YARN,MAPREDUCE2)"
+        "urlParams": "ServiceInfo/service_name.in(OOZIE,HDFS,YARN,MAPREDUCE2)"
       }
     }];
     tests.forEach(function(t){

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/controllers/wizard/step8_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step8_test.js b/ambari-web/test/controllers/wizard/step8_test.js
index 2a52598..de43a97 100644
--- a/ambari-web/test/controllers/wizard/step8_test.js
+++ b/ambari-web/test/controllers/wizard/step8_test.js
@@ -1954,7 +1954,7 @@ describe('App.WizardStep8Controller', function () {
 
   describe('#_startDeploy', function () {
 
-    var stubbedNames = ['createCluster', 'createSelectedServices', 'createConfigurations',
+    var stubbedNames = ['createCluster', 'createServiceGroup', 'createSelectedServices', 'createConfigurations',
         'applyConfigurationsToCluster', 'createComponents', 'registerHostsToCluster', 'createConfigurationGroups',
         'createMasterHostComponents', 'createSlaveAndClientsHostComponents', 'createAdditionalClientComponents',
         'createAdditionalHostComponents'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/mappers/service_mapper_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mappers/service_mapper_test.js b/ambari-web/test/mappers/service_mapper_test.js
index bfdff64..4a8d49d 100644
--- a/ambari-web/test/mappers/service_mapper_test.js
+++ b/ambari-web/test/mappers/service_mapper_test.js
@@ -178,7 +178,7 @@ describe('App.serviceMetricsMapper', function () {
           {
             "ServiceComponentInfo" : {
               "component_name" : "STORM_UI_SERVER",
-              "service_display_name" : "STORM"
+              "service_name" : "STORM"
             },
             "metrics" : {
               "api" : {
@@ -231,7 +231,7 @@ describe('App.serviceMetricsMapper', function () {
           {
             "ServiceComponentInfo" : {
               "component_name" : "STORM_REST_API",
-              "service_display_name" : "STORM"
+              "service_name" : "STORM"
             },
             "metrics" : {
               "api" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/mappers/service_metrics_mapper_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mappers/service_metrics_mapper_test.js b/ambari-web/test/mappers/service_metrics_mapper_test.js
index ea2c977..04dbce0 100644
--- a/ambari-web/test/mappers/service_metrics_mapper_test.js
+++ b/ambari-web/test/mappers/service_metrics_mapper_test.js
@@ -56,13 +56,13 @@ describe('App.serviceMetricsMapper', function () {
     var services = [
       {
         ServiceInfo: {
-          service_display_name: "HDFS"
+          service_name: "HDFS"
         },
         components: [
           {
             ServiceComponentInfo: {
               component_name: "NAMENODE",
-              service_display_name: "HDFS"
+              service_name: "HDFS"
             },
             host_components: [
               {

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b21c0/ambari-web/test/mixins/common/configs/configs_saver_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/configs/configs_saver_test.js b/ambari-web/test/mixins/common/configs/configs_saver_test.js
index ef3bab5..f20a49f 100644
--- a/ambari-web/test/mixins/common/configs/configs_saver_test.js
+++ b/ambari-web/test/mixins/common/configs/configs_saver_test.js
@@ -1102,7 +1102,7 @@ describe('App.ConfigsSaverMixin', function() {
         header: Em.I18n.t('services.service.config.saved'),
         message: Em.I18n.t('services.service.config.saved.message'),
         messageClass: 'alert alert-success',
-        urlParams: ',ServiceComponentInfo/installed_count,ServiceComponentInfo/total_count&ServiceComponentInfo/service_display_name.in(HDFS)'
+        urlParams: ',ServiceComponentInfo/installed_count,ServiceComponentInfo/total_count&ServiceComponentInfo/service_name.in(HDFS)'
       });
     });
 


[31/50] [abbrv] ambari git commit: AMBARI-22155. Intermittent failure of FlumeTimelineMetricsSinkTest

Posted by ja...@apache.org.
AMBARI-22155. Intermittent failure of FlumeTimelineMetricsSinkTest


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b0c24a51
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b0c24a51
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b0c24a51

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: b0c24a5153949e4c0cbf70d217276416515c1211
Parents: 75465a8
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Oct 9 18:56:36 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Oct 9 18:57:08 2017 +0200

----------------------------------------------------------------------
 .../flume/FlumeTimelineMetricsSinkTest.java     | 27 ++++++++++++--------
 1 file changed, 17 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b0c24a51/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
index bd4ae6a..99da43f 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
@@ -18,17 +18,21 @@
 
 package org.apache.hadoop.metrics2.sink.flume;
 
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.powermock.api.easymock.PowerMock.mockStatic;
 import static org.powermock.api.easymock.PowerMock.replay;
-import static org.powermock.api.easymock.PowerMock.replayAll;
 import static org.powermock.api.easymock.PowerMock.resetAll;
 import static org.powermock.api.easymock.PowerMock.verifyAll;
 
 import java.net.InetAddress;
 import java.util.Collections;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.flume.Context;
 import org.apache.flume.instrumentation.util.JMXPollUtil;
@@ -43,7 +47,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest(JMXPollUtil.class)
+@PrepareForTest({JMXPollUtil.class, Executors.class, FlumeTimelineMetricsSink.class})
 public class FlumeTimelineMetricsSinkTest {
   @Test
   public void testNonNumericMetricMetricExclusion() throws InterruptedException {
@@ -76,7 +80,7 @@ public class FlumeTimelineMetricsSinkTest {
     flumeTimelineMetricsSink.setMetricsCaches(Collections.singletonMap("SINK",timelineMetricsCache));
     EasyMock.expect(timelineMetricsCache.getTimelineMetric("key1"))
         .andReturn(new TimelineMetric()).once();
-    timelineMetricsCache.putTimelineMetric(EasyMock.anyObject(TimelineMetric.class));
+    timelineMetricsCache.putTimelineMetric(anyObject(TimelineMetric.class));
     EasyMock.expectLastCall().once();
     return timelineMetricsCache;
   }
@@ -86,15 +90,18 @@ public class FlumeTimelineMetricsSinkTest {
     FlumeTimelineMetricsSink flumeTimelineMetricsSink = new FlumeTimelineMetricsSink();
     TimelineMetricsCache timelineMetricsCache = getTimelineMetricsCache(flumeTimelineMetricsSink);
     flumeTimelineMetricsSink.setPollFrequency(1);
-    mockStatic(JMXPollUtil.class);
-    EasyMock.expect(JMXPollUtil.getAllMBeans()).andReturn(
-        Collections.singletonMap("component1", Collections.singletonMap("key1", "42"))).once();
-    flumeTimelineMetricsSink.start();
-    flumeTimelineMetricsSink.stop();
-    replay(JMXPollUtil.class, timelineMetricsCache);
+    mockStatic(Executors.class);
+    ScheduledExecutorService executor = createNiceMock(ScheduledExecutorService.class);
+    expect(Executors.newSingleThreadScheduledExecutor()).andReturn(executor);
+    FlumeTimelineMetricsSink.TimelineMetricsCollector collector = anyObject();
+    TimeUnit unit = anyObject();
+    expect(executor.scheduleWithFixedDelay(collector, eq(0), eq(1), unit)).andReturn(null);
+    executor.shutdown();
+    replay(timelineMetricsCache, Executors.class, executor);
+
     flumeTimelineMetricsSink.start();
-    Thread.sleep(5);
     flumeTimelineMetricsSink.stop();
+
     verifyAll();
   }
 


[30/50] [abbrv] ambari git commit: AMBARI-22159. Replace hostgroup vars for Druid

Posted by ja...@apache.org.
AMBARI-22159. Replace hostgroup vars for Druid


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/75465a83
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/75465a83
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/75465a83

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 75465a83bd743bb3a2fa74acf30cfca4d0a2287c
Parents: f1c4626
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Oct 9 14:40:02 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Oct 9 18:39:22 2017 +0200

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        | 81 +++++++++++++++-----
 .../BlueprintConfigurationProcessorTest.java    | 32 ++++++++
 2 files changed, 92 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/75465a83/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 5a6e2cc..03f84a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -32,6 +32,7 @@ import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -1367,11 +1368,56 @@ public class BlueprintConfigurationProcessor {
                                              ClusterTopology topology);
   }
 
+  private static class HostGroupUpdater implements PropertyUpdater {
+
+    public static final PropertyUpdater INSTANCE = new HostGroupUpdater();
+
+    @Override
+    public String updateForClusterCreate(String propertyName,
+      String origValue,
+      Map<String, Map<String, String>> properties,
+      ClusterTopology topology) {
+
+      //todo: getHostStrings
+      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+      if (m.find()) {
+        String hostGroupName = m.group(1);
+
+        HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
+        if (groupInfo == null) {
+          //todo: this should be validated in configuration validation
+          throw new RuntimeException(
+            "Encountered a host group token in configuration which couldn't be matched to a host group: "
+              + hostGroupName);
+        }
+
+        //todo: warn if > hosts
+        return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+      }
+
+      return origValue;
+    }
+
+    @Override
+    public Collection<String> getRequiredHostGroups(String propertyName,
+      String origValue,
+      Map<String, Map<String, String>> properties,
+      ClusterTopology topology) {
+      //todo: getHostStrings
+      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+      if (m.find()) {
+        String hostGroupName = m.group(1);
+        return Collections.singleton(hostGroupName);
+      }
+      return Collections.emptySet();
+    }
+  }
+
   /**
    * Topology based updater which replaces the original host name of a property with the host name
    * which runs the associated (master) component in the new cluster.
    */
-  private static class SingleHostTopologyUpdater implements PropertyUpdater {
+  private static class SingleHostTopologyUpdater extends HostGroupUpdater {
     /**
      * Component name
      */
@@ -1402,21 +1448,9 @@ public class BlueprintConfigurationProcessor {
                                          Map<String, Map<String, String>> properties,
                                          ClusterTopology topology)  {
 
-      //todo: getHostStrings
-      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
-      if (m.find()) {
-        String hostGroupName = m.group(1);
-
-        HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
-        if (groupInfo == null) {
-          //todo: this should be validated in configuration validation
-          throw new RuntimeException(
-              "Encountered a host group token in configuration which couldn't be matched to a host group: "
-              + hostGroupName);
-        }
-
-        //todo: warn if > hosts
-        return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+      String replacedValue = super.updateForClusterCreate(propertyName, origValue, properties, topology);
+      if (!Objects.equals(origValue, replacedValue)) {
+        return replacedValue;
       } else {
         int matchingGroupCount = topology.getHostGroupsForComponent(component).size();
         if (matchingGroupCount == 1) {
@@ -1525,11 +1559,9 @@ public class BlueprintConfigurationProcessor {
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
                                                     ClusterTopology topology) {
-      //todo: getHostStrings
-      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
-      if (m.find()) {
-        String hostGroupName = m.group(1);
-        return Collections.singleton(hostGroupName);
+      Collection<String> result = super.getRequiredHostGroups(propertyName, origValue, properties, topology);
+      if (!result.isEmpty()) {
+        return result;
       } else {
         Collection<String> matchingGroups = topology.getHostGroupsForComponent(component);
         int matchingGroupCount = matchingGroups.size();
@@ -2351,6 +2383,7 @@ public class BlueprintConfigurationProcessor {
     allUpdaters.add(nonTopologyUpdaters);
 
     Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> druidCommon = new HashMap<>();
     Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
     Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
     Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
@@ -2404,6 +2437,7 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();
 
     singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
+    singleHostTopologyUpdaters.put("druid-common", druidCommon);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
     singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
     singleHostTopologyUpdaters.put("core-site", coreSiteMap);
@@ -2775,6 +2809,11 @@ public class BlueprintConfigurationProcessor {
         }
       }
     });
+
+    // DRUID
+    druidCommon.put("metastore_hostname", HostGroupUpdater.INSTANCE);
+    druidCommon.put("druid.metadata.storage.connector.connectURI", HostGroupUpdater.INSTANCE);
+    druidCommon.put("druid.zk.service.host", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
   }
 
   private static void addUnitPropertyUpdaters() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/75465a83/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 68d6349..d137f2c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -88,6 +88,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 /**
  * BlueprintConfigurationProcessor unit tests.
@@ -7933,6 +7934,37 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     assertEquals(someString, metricsReporterRegister);
   }
 
+  @Test
+  public void druidProperties() throws Exception {
+    Map<String, Map<String, String>> properties = new HashMap<>();
+    Map<String, String> druidCommon = new HashMap<>();
+    String connectUriKey = "druid.metadata.storage.connector.connectURI";
+    String metastoreHostnameKey = "metastore_hostname";
+    String connectUriTemplate = "jdbc:mysql://%s:3306/druid?createDatabaseIfNotExist=true";
+    druidCommon.put(connectUriKey, String.format(connectUriTemplate, "%HOSTGROUP::group1%"));
+    druidCommon.put(metastoreHostnameKey, "%HOSTGROUP::group1%");
+    properties.put("druid-common", druidCommon);
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+    Collection<String> hgComponents1 = Sets.newHashSet("DRUID_COORDINATOR");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
+
+    Collection<String> hgComponents2 = Sets.newHashSet("DRUID_BROKER", "DRUID_OVERLORD", "DRUID_ROUTER");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("host2"));
+
+    Collection<TestHostGroup> hostGroups = Arrays.asList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    configProcessor.doUpdateForClusterCreate();
+
+    assertEquals(String.format(connectUriTemplate, "host1"), clusterConfig.getPropertyValue("druid-common", connectUriKey));
+    assertEquals("host1", clusterConfig.getPropertyValue("druid-common", metastoreHostnameKey));
+  }
 
   @Test
   public void testAmsPropertiesDefault() throws Exception {


[34/50] [abbrv] ambari git commit: AMBARI-22079. Addendum: Upgrade Yarn version for Logsearch Web (Istvan Tobias via oleewere)

Posted by ja...@apache.org.
AMBARI-22079. Addendum: Upgrade Yarn version for Logsearch Web (Istvan Tobias via oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ca85137
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ca85137
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ca85137

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 0ca85137fdf1667e3fddb28c220a6e1f6e88a2ee
Parents: 7e0fe29
Author: Istvan Tobias <to...@gmal.com>
Authored: Mon Oct 9 21:21:52 2017 +0200
Committer: Oliver Szabo <ol...@gmail.com>
Committed: Mon Oct 9 21:21:52 2017 +0200

----------------------------------------------------------------------
 ambari-logsearch/ambari-logsearch-web/pom.xml | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ca85137/ambari-logsearch/ambari-logsearch-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/pom.xml b/ambari-logsearch/ambari-logsearch-web/pom.xml
index d641d5a..a0621d9 100644
--- a/ambari-logsearch/ambari-logsearch-web/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-web/pom.xml
@@ -69,13 +69,16 @@
             </configuration>
           </execution>
           <execution>
-            <id>generate dist</id>
-            <phase>generate-resources</phase>
+            <id>webpack build</id>
             <goals>
-             <goal>yarn</goal>
+              <goal>webpack</goal>
             </goals>
+            <!-- optional: the default phase is "generate-resources" -->
+            <phase>generate-resources</phase>
             <configuration>
-              <arguments>build-prod</arguments>
+              <!-- optional: if not specified, it will run webpack's default
+              build (and you can remove this whole <configuration> section.) -->
+              <arguments>-p</arguments>
             </configuration>
           </execution>
           <execution>


[03/50] [abbrv] ambari git commit: AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)

Posted by ja...@apache.org.
AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/388cb418
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/388cb418
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/388cb418

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: 388cb4180b3ea08e25d95aaf67bac68e8283a518
Parents: f44c866
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 17:07:24 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 17:07:41 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 ++
 .../AmbariManagementControllerImpl.java         | 22 ++---
 .../internal/ServiceResourceProvider.java       |  8 --
 .../internal/StackArtifactResourceProvider.java | 18 +---
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ----
 .../apache/ambari/server/stack/StackModule.java |  5 --
 .../apache/ambari/server/state/StackInfo.java   |  8 --
 .../resources/stacks/HDP/2.0.6/widgets.json     | 95 --------------------
 .../main/resources/stacks/HDP/3.0/widgets.json  | 95 --------------------
 ambari-server/src/main/resources/widgets.json   | 95 ++++++++++++++++++++
 .../server/api/services/AmbariMetaInfoTest.java |  8 ++
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 ++
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 --------------------
 ambari-server/src/test/resources/widgets.json   | 95 ++++++++++++++++++++
 16 files changed, 224 insertions(+), 364 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index de84965..425d247 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
+import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -125,6 +126,7 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
+  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -214,6 +216,8 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
+
+    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1435,4 +1439,8 @@ public class AmbariMetaInfo {
 
     return null;
   }
+
+  public File getCommonWidgetsDescriptorFile() {
+    return commonWidgetsDescriptorFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index b2993e3..5642575 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,22 +5184,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      Set<StackId> stackIds = new HashSet<>();
-
-      for (Service svc : cluster.getServices().values()) {
-        stackIds.add(svc.getDesiredStackId());
-      }
-
-      for (StackId stackId : stackIds) {
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
-
-        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-        if (widgetDescriptorFileLocation != null) {
-          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-          if (widgetDescriptorFile.exists()) {
-            widgetDescriptorFiles.add(widgetDescriptorFile);
-          }
-        }
+      // common cluster level widgets
+      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
+        widgetDescriptorFiles.add(commonWidgetsFile);
+      } else {
+        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 76a4547..e65693b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,8 +423,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
-    Set<Cluster> clustersSetFromRequests = new HashSet<>();
-
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -480,12 +478,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
-      clustersSetFromRequests.add(cluster);
-    }
-
-    // Create cluster widgets and layouts
-    for (Cluster cluster : clustersSetFromRequests) {
-      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index 2e8a32a..a7f7710 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return getWidgetsDescriptorForCluster(stackInfo);
+      return null;
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,22 +450,6 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
-  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
-      throws NoSuchParentResourceException, IOException {
-
-    Map<String, Object> widgetDescriptor = null;
-
-    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-    if (widgetDescriptorFileLocation != null) {
-      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-      if (widgetDescriptorFile.exists()) {
-        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-      }
-    }
-
-    return widgetDescriptor;
-  }
-
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 90d98fc..1fa45e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,6 +33,7 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -41,7 +42,8 @@ import javax.persistence.TableGenerator;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0
+        initialValue = 0,
+        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -56,7 +58,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
+  @Column(name = "layout_name", nullable = false, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index 9259466..e3c586b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,11 +94,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
-   * widgets descriptor file path
-   */
-  private String widgetsDescriptorFilePath;
-
-  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -233,15 +228,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) widgets descriptor file
-   *
-   * @return the path to the (stack-level) widgets descriptor file
-   */
-  public String getWidgetsDescriptorFilePath() {
-    return widgetsDescriptorFilePath;
-  }
-
-  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -324,10 +310,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
-    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
-      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
-    }
-
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 742706d..71235f3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,10 +294,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
-    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
-      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
-    }
-
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -573,7 +569,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
-      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index dcf850f..3efc997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,14 +429,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
-  public String getWidgetsDescriptorFileLocation() {
-    return widgetsDescriptorFileLocation;
-  }
-
-  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
-    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/widgets.json b/ambari-server/src/main/resources/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4baca5c..25e8d04 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,6 +1894,14 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
+  @Test
+  public void testGetCommonWidgetsFile() throws AmbariException {
+    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
+
+    Assert.assertNotNull(widgetsFile);
+    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+  }
+
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index a02690f..9547271 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,18 +2367,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HDFS", service)
-        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
-    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
+    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b370829..7094caa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10424,6 +10425,11 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
+    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+    assertNotNull(widgetsFile);
+    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
+    assertTrue(widgetsFile.exists());
+
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/resources/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/widgets.json b/ambari-server/src/test/resources/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/test/resources/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[26/50] [abbrv] ambari git commit: AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)

Posted by ja...@apache.org.
AMBARI-22089. Hive View 2.0 - Unable to update existing saved queries, view creates new records under saved queries tab (pallavkul)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/beef96d4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/beef96d4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/beef96d4

Branch: refs/heads/branch-feature-AMBARI-14714-ui
Commit: beef96d413d4986cb0cc11c317d11d5a181da14c
Parents: fc80a18
Author: pallavkul <pa...@gmail.com>
Authored: Mon Oct 9 21:17:35 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Mon Oct 9 21:17:35 2017 +0530

----------------------------------------------------------------------
 .../savedQueries/SavedQueryResourceManager.java | 17 ++++--
 .../savedQueries/SavedQueryService.java         | 46 +++++++--------
 .../resources/ui/app/routes/queries/query.js    | 60 ++++++++++++++------
 .../resources/ui/app/services/saved-queries.js  | 21 +++++++
 .../hive20/src/main/resources/ui/yarn.lock      |  2 +-
 5 files changed, 98 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
index 3690683..fff202c 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryResourceManager.java
@@ -115,12 +115,17 @@ public class SavedQueryResourceManager extends PersonalCRUDResourceManager<Saved
   }
 
   @Override
-  public SavedQuery update(SavedQuery newObject, String id) throws ItemNotFound {
-    SavedQuery savedQuery = super.update(newObject, id);
-    // Emptying short query so that in next read, this gets updated with proper value
-    // from the queryFile
-    emptyShortQueryField(savedQuery);
-    return savedQuery;
+  public SavedQuery update(SavedQuery object, String id) throws ItemNotFound {
+    String query = object.getShortQuery();
+    object.setShortQuery(makeShortQuery(query));
+    object = super.update(object, id);
+    try {
+      createDefaultQueryFile(object, query);
+
+    } catch (ServiceFormattedException e) {
+      cleanupAfterErrorAndThrowAgain(object, e);
+    }
+    return object;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
index 7139ce8..35382f9 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/savedQueries/SavedQueryService.java
@@ -83,36 +83,36 @@ public class SavedQueryService extends BaseService {
   @Path("{queryId}")
   @Produces(MediaType.APPLICATION_JSON)
   public Response getOne(@PathParam("queryId") String queryId,
-		         @QueryParam("op") String operation) {
+             @QueryParam("op") String operation) {
     try {
-      final SavedQuery savedQuery = getResourceManager().read(queryId);    
-      if(operation.equals("download")) {
-    	StreamingOutput stream = new StreamingOutput() {
-    	@Override
-    	public void write(OutputStream os) throws IOException, WebApplicationException {
-    	  Writer writer = new BufferedWriter(new OutputStreamWriter(os));
-    	  try { 
+      final SavedQuery savedQuery = getResourceManager().read(queryId);
+      if(operation!= null && operation.equals("download")) {
+      StreamingOutput stream = new StreamingOutput() {
+      @Override
+      public void write(OutputStream os) throws IOException, WebApplicationException {
+        Writer writer = new BufferedWriter(new OutputStreamWriter(os));
+        try {
             BufferedReader br=new BufferedReader(new InputStreamReader(getSharedObjectsFactory().getHdfsApi().open(savedQuery.getQueryFile())));
-	    String line;
-    	    line=br.readLine();
-    	    while (line != null){
-    	      writer.write(line+"\n");  
-    	      line = br.readLine();
+      String line;
+          line=br.readLine();
+          while (line != null){
+            writer.write(line+"\n");
+            line = br.readLine();
             }
             writer.flush();
-    	  } catch (InterruptedException e) {
-	    e.printStackTrace();
-	  } finally {
-	    writer.close();
-    	  }
-    	}
-    	};
-    	return Response.ok(stream).
+        } catch (InterruptedException e) {
+      e.printStackTrace();
+    } finally {
+      writer.close();
+        }
+      }
+      };
+      return Response.ok(stream).
                type(MediaType.TEXT_PLAIN).
-    	       build();
+             build();
       }
       else {
-    	 JSONObject object = new JSONObject();
+       JSONObject object = new JSONObject();
          object.put("savedQuery", savedQuery);
          return Response.ok(object).build();
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
index 9e8b6db..9bb5c8f 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
@@ -631,6 +631,8 @@ export default Ember.Route.extend(UILoggerMixin, {
       let owner = this.get('controller.model').get('owner');
       let queryFile = this.get('controller.model').get('queryFile');
       let logFile = this.get('controller.model').get('logFile');
+      let shortQuery = (currentQuery.length > 0) ? currentQuery : ";";
+      let savedQueryId = this.get('controller.model').get('id')
 
       let payload = {"title" : newTitle,
         "dataBase": selectedDb,
@@ -639,32 +641,54 @@ export default Ember.Route.extend(UILoggerMixin, {
         "queryFile" : queryFile,
         "logFile" : logFile};
 
-      let newSaveQuery = this.get('store').createRecord('saved-query',
-        { dataBase:selectedDb,
-          title:newTitle,
-          queryFile: queryFile,
-          owner: owner,
-          shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
-        });
 
+      let existingSavedQuery = this.get('store').peekRecord('saved-query', savedQueryId);
 
-      newSaveQuery.save().then((data) => {
-        console.log('saved query saved');
+      if(existingSavedQuery){
 
-        this.get('controller.model').set('title', newTitle);
-        this.get('controller.model').set('isQueryDirty', false);
-        this.get('controller').set('worksheetModalSuccess', true);
+          this.get('savedQueries').updateSavedQuery(existingSavedQuery.get('id'), shortQuery, selectedDb, owner).then( data => {
+              console.log('saved query updated.');
+              this.get('controller.model').set('title', newTitle);
+              this.get('controller.model').set('isQueryDirty', false);
+              this.get('controller').set('worksheetModalSuccess', true);
 
-        Ember.run.later(() => {
-          this.get('controller').set('showWorksheetModal', false);
-          this.closeWorksheetAfterSave();
-        }, 2 * 1000);
+              Ember.run.later(() => {
+                this.get('controller').set('showWorksheetModal', false);
+                this.closeWorksheetAfterSave();
+              }, 2 * 1000);
 
-      });
+            }).catch(function (response) {
+               console.log('error', response);
+            });
+
+      } else{
+
+        let newSaveQuery = this.get('store').createRecord('saved-query',
+          { dataBase:selectedDb,
+            title:newTitle,
+            queryFile: queryFile,
+            owner: owner,
+            shortQuery: (currentQuery.length > 0) ? currentQuery : ";"
+          });
+
+        newSaveQuery.save().then((data) => {
+          console.log('saved query saved');
 
+          this.get('controller.model').set('title', newTitle);
+          this.get('controller.model').set('isQueryDirty', false);
+          this.get('controller').set('worksheetModalSuccess', true);
+
+          Ember.run.later(() => {
+            this.get('controller').set('showWorksheetModal', false);
+            this.closeWorksheetAfterSave();
+          }, 2 * 1000);
+
+        });
+
+      }
     },
 
-    closeWorksheetModal(){
+  closeWorksheetModal(){
       this.get('controller').set('showWorksheetModal', false);
       this.closeWorksheetAfterSave();
       this.get('controller.model').set('tabDataToClose', null);

http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js b/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
index 4b4b29e..b1a28ce 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/services/saved-queries.js
@@ -50,6 +50,27 @@ export default Ember.Service.extend({
     });
   },
 
+  updateSavedQuery(savedQueryId, shortQuery, selectedDb, owner){
+    return new Ember.RSVP.Promise((resolve, reject) => {
+
+      this.get('store').findRecord('saved-query', savedQueryId, {async: true} ).then(savedQuery => {
+          savedQuery.set('shortQuery', shortQuery);
+          savedQuery.set('dataBase', selectedDb );
+          savedQuery.set('owner', owner );
+          savedQuery.save().then(() => {
+            return resolve("");
+          })
+
+
+
+
+       }).catch(function (response) {
+          console.log('error', response);
+          return reject(response);
+       });
+    });
+  },
+
   fetchSavedQuery(path) {
     let url = this.get('store').adapterFor('application').buildURL()+ '/files/' + encodeURIComponent(path);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/beef96d4/contrib/views/hive20/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/yarn.lock b/contrib/views/hive20/src/main/resources/ui/yarn.lock
index 477a15c..607cf81 100644
--- a/contrib/views/hive20/src/main/resources/ui/yarn.lock
+++ b/contrib/views/hive20/src/main/resources/ui/yarn.lock
@@ -569,7 +569,7 @@ babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
   dependencies:
     babel-runtime "^6.22.0"
 
-babel-plugin-transform-es2015-block-scoping@^6.23.0:
+babel-plugin-transform-es2015-block-scoping@^6.23.0, babel-plugin-transform-es2015-block-scoping@^6.24.1:
   version "6.24.1"
   resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
   dependencies: