You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/04/17 20:17:55 UTC

[01/34] ambari git commit: AMBARI-20717.Need to disable upload file option from file browser window in workflow manager(Venkata Sairam)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-12556 9f6387157 -> b013be0b9


AMBARI-20717.Need to disable upload file option from file browser window in workflow manager(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/310c5544
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/310c5544
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/310c5544

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 310c5544155e75879c78f9e7ceef8f35cf3da587
Parents: 11ab63f
Author: Venkata Sairam <ve...@gmail.com>
Authored: Wed Apr 12 19:21:05 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Wed Apr 12 19:23:18 2017 +0530

----------------------------------------------------------------------
 .../app/templates/components/bundle-config.hbs  |  2 +-
 .../app/templates/components/coord-config.hbs   |  2 +-
 .../app/templates/components/flow-designer.hbs  |  2 +-
 .../app/templates/components/hdfs-browser.hbs   | 44 ++++++++++----------
 4 files changed, 26 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/310c5544/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
index b674990..0ea6c4b 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
@@ -121,7 +121,7 @@
   </div>
 </div>
 {{#if showingFileBrowser}}
-  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath}}
+  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath hideUpload=true}}
 {{/if}}
 {{#if showingJobConfig}}
   {{job-config type='bundle' closeJobConfigs="closeBundleSubmitConfig" jobFilePath=bundleFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=bundleConfigs containsParameteriedPaths=containsParameteriedPaths jobConfigProperties=jobConfigProperties}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/310c5544/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
index 3b1b6a9..8f88b88 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
@@ -332,7 +332,7 @@
 </div>
 </div>
 {{#if showingFileBrowser}}
-{{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath}}
+{{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath hideUpload=true}}
 {{/if}}
 {{#if showingJobConfig}}
   {{job-config type='coord' closeJobConfigs="closeCoordSubmitConfig"

http://git-wip-us.apache.org/repos/asf/ambari/blob/310c5544/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
index 429e874..8c07d6d 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
@@ -279,7 +279,7 @@
   {{#global-config closeGlobalConfig="closeWorkflowGlobalProps" saveGlobalConfig="saveGlobalConfig" actionModel=globalConfig}}{{/global-config}}
 {{/if}}
 {{#if showingFileBrowser}}
-  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=workflowFilePath}}
+  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=workflowFilePath hideUpload=true}}
 {{/if}}
 {{#if showingActionSettingsFileBrowser}}
   {{hdfs-browser closeFileBrowser="closeActionSettingsFileBrowser" selectFileCallback=selectFileCallback filePath=actionSettingsFilePath}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/310c5544/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
index 44f25c8..5779e36 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
@@ -25,30 +25,32 @@
           </div>
       </div>
       <div class="panel-default panel-files">
-        <div class="panel-heading">
-          <div class="row">
-             <div class="col-xs-12">
-                <div class="pull-right">
-                  <!-- <span class="">
-                      <button type="button" class="btn btn-default" {{action "createFolder"}} disabled={{isFilePathInvalid}}>Create Folder</button>
-                  </span> -->
-                  {{#unless uploadSelected}}
-                    <span>
-                      <button type="button" class="btn btn-default" {{action "uploadSelect"}} disabled={{isFilePathInvalid}}>Upload File</button>
-                    </span>
-                  {{/unless}}
-                  {{#if uploadSelected}}
-                  <span class="">
-                      <span class="file-upload-control">
-                        {{file-upload url="/upload" selectedPath=selectedPath uploadFailure="uploadFailure" uploadSuccess="uploadSuccess" uploadValidation="uploadValidation"}}
+        {{#unless hideUpload}}
+          <div class="panel-heading">
+            <div class="row">
+               <div class="col-xs-12">
+                  <div class="pull-right">
+                    <!-- <span class="">
+                        <button type="button" class="btn btn-default" {{action "createFolder"}} disabled={{isFilePathInvalid}}>Create Folder</button>
+                    </span> -->
+                    {{#unless uploadSelected}}
+                      <span>
+                        <button type="button" class="btn btn-default" {{action "uploadSelect"}} disabled={{isFilePathInvalid}}>Upload File</button>
                       </span>
-                      <button type="button" class="close-icon" {{action "closeUpload"}}>x</button>
-                  </span>
-                  {{/if}}
+                    {{/unless}}
+                    {{#if uploadSelected}}
+                    <span class="">
+                        <span class="file-upload-control">
+                          {{file-upload url="/upload" selectedPath=selectedPath uploadFailure="uploadFailure" uploadSuccess="uploadSuccess" uploadValidation="uploadValidation"}}
+                        </span>
+                        <button type="button" class="close-icon" {{action "closeUpload"}}>x</button>
+                    </span>
+                    {{/if}}
+                  </div>
                 </div>
-              </div>
+            </div>
           </div>
-        </div>
+        {{/unless}}
         <div class="panel-body">
           <div>
             {{#if showUploadSuccess}}


[07/34] ambari git commit: AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)

Posted by nc...@apache.org.
AMBARI-20696: Skip calling stack selector, conf selector tools for Nifi, Streamline, Registry custom services in HDP cluster (Madhuvanthi Radhakrishnan via jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4f2523e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4f2523e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4f2523e7

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4f2523e7f08fd81859831353a18ffb52ab0af8e9
Parents: aca12b7
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Apr 12 23:42:17 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Apr 12 23:42:17 2017 -0700

----------------------------------------------------------------------
 .../libraries/functions/stack_select.py         |  5 +++
 .../libraries/functions/stack_tools.py          | 10 +++++
 .../libraries/functions/version_select_util.py  | 42 ++++++++++++++++++++
 .../ambari/server/agent/ExecutionCommand.java   | 11 +++++
 .../AmbariCustomCommandExecutionHelper.java     |  8 ++--
 .../AmbariManagementControllerImpl.java         |  1 +
 .../internal/ClientConfigResourceProvider.java  |  1 +
 .../scripts/shared_initialization.py            |  5 ++-
 .../AmbariCustomCommandExecutionHelperTest.java |  1 +
 .../AmbariManagementControllerTest.java         |  4 +-
 10 files changed, 81 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
index 79393b9..00127b1 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -123,6 +123,11 @@ def select_all(version_to_select):
   """
   stack_root = Script.get_stack_root()
   (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
+  if stack_selector_path is None:
+    Logger.warning(format("Skipping executing \"stack select all\ as stack selector path is None"))
+    return
+
+
   # it's an error, but it shouldn't really stop anything from working
   if version_to_select is None:
     Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..93ec0b7 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -32,6 +32,7 @@ from resource_management.core.utils import pad
 STACK_SELECTOR_NAME = "stack_selector"
 CONF_SELECTOR_NAME = "conf_selector"
 
+
 def get_stack_tool(name):
   """
   Give a tool selector name get the stack-specific tool name, tool path, tool package
@@ -41,8 +42,17 @@ def get_stack_tool(name):
   from resource_management.libraries.functions.default import default
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
+  stack_name = default("/hostLevelParams/stack_name", None)
+  service_name = default("/serviceName", None)
+
+  #Get version Advertised tag to decide whether or not to call the selector tools
+  is_version_advertised = default("/versionAdvertised", True)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
+  if service_name is not None:
+    if not is_version_advertised:
+      Logger.warning(format("No \"stack selector tool\" returned as the component does not advertise a version"))
+      return (None, None, None)
 
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index ff00a1f..85fe807 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -79,6 +79,48 @@ def get_component_version(stack_name, component_name):
   return version
 
 
+def get_component_version_with_stack_selector(stack_selector_path, component_name):
+  """
+   For specific cases where we deal with HDP add on services from a management pack, the version
+   needs to be determined by using the specific stack selector itself.
+   :param stack_selector_path: /usr/bin/hdf-select
+   Comes from the service which calls for this function.
+   :param component_name: Component name as a string necessary to get the version
+   :return: Returns a string if found, e.g., 2.2.1.0-2175, otherwise, returns None
+   This function can be called by custom services, hence should not be removed
+  """
+  version = None
+  out = None
+  code = -1
+  if not stack_selector_path:
+    Logger.error("Stack selector path not provided")
+  elif not os.path.exists(stack_selector_path):
+    Logger.error("Stack selector path does not exist")
+  elif not component_name:
+    Logger.error("Component name not provided")
+  else:
+    tmpfile = tempfile.NamedTemporaryFile()
+
+    get_stack_comp_version_cmd = ""
+    try:
+      # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
+      with open(tmpfile.name, 'r') as file:
+        get_stack_comp_version_cmd = '{0} status {1} > {2}' .format(stack_selector_path, component_name, tmpfile.name)
+        code, stdoutdata = shell.call(get_stack_comp_version_cmd, quiet=True)
+        out = file.read()
+
+      if code != 0 or out is None:
+        raise Exception("Code is nonzero or output is empty")
+
+      Logger.debug("Command: %s\nOutput: %s" % (get_stack_comp_version_cmd, str(out)))
+      matches = re.findall(r"([\d\.]+\-\d+)", out)
+      version = matches[0] if matches and len(matches) > 0 else None
+    except Exception, e:
+      Logger.error("Could not determine stack version for component %s by calling '%s'. Return Code: %s, Output: %s." %
+                   (component_name, get_stack_comp_version_cmd, str(code), str(out)))
+  return version
+
+
 def get_versions_from_stack_root(stack_root):
   """
   Given a stack install root, returns a list of stack versions currently installed.

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 8c726a0..95da25e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -97,6 +97,9 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("serviceName")
   private String serviceName;
 
+  @SerializedName("versionAdvertised")
+  private boolean versionAdvertised;
+
   @SerializedName("serviceType")
   private String serviceType;
 
@@ -327,6 +330,14 @@ public class ExecutionCommand extends AgentCommand {
     this.serviceName = serviceName;
   }
 
+  public boolean getVersionAdvertised() {
+    return versionAdvertised;
+  }
+
+  public void setVersionAdvertised(boolean versionAdvertised) {
+    this.versionAdvertised = versionAdvertised;
+  }
+
   public String getServiceType() {
 	return serviceType;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index d5018f5..d5d7cf4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -326,6 +326,9 @@ public class AmbariCustomCommandExecutionHelper {
     AmbariMetaInfo ambariMetaInfo = managementController.getAmbariMetaInfo();
     ServiceInfo serviceInfo = ambariMetaInfo.getService(
         stackId.getStackName(), stackId.getStackVersion(), serviceName);
+    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+            stackId.getStackName(), stackId.getStackVersion(),
+            serviceName, componentName);
     StackInfo stackInfo = ambariMetaInfo.getStack
        (stackId.getStackName(), stackId.getStackVersion());
 
@@ -432,6 +435,7 @@ public class AmbariCustomCommandExecutionHelper {
       hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 
       execCmd.setHostLevelParams(hostLevelParams);
+      execCmd.setVersionAdvertised(componentInfo.isVersionAdvertised());
 
       Map<String, String> commandParams = new TreeMap<>();
       if (additionalCommandParams != null) {
@@ -443,10 +447,6 @@ public class AmbariCustomCommandExecutionHelper {
       boolean isInstallCommand = commandName.equals(RoleCommand.INSTALL.toString());
       String commandTimeout = configs.getDefaultAgentTaskTimeout(isInstallCommand);
 
-      ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-          stackId.getStackName(), stackId.getStackVersion(),
-          serviceName, componentName);
-
       if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
         // Service check command is not custom command
         CommandScriptDefinition script = componentInfo.getCommandScript();

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 751ce08..76c1167 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2457,6 +2457,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     hostParams.put(UNLIMITED_KEY_JCE_REQUIRED, (unlimitedKeyJCEPolicyRequired) ? "true" : "false");
 
     execCmd.setHostLevelParams(hostParams);
+    execCmd.setVersionAdvertised(componentInfo.isVersionAdvertised());
 
     Map<String, String> roleParams = new TreeMap<>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index e42bd45..77b027c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -443,6 +443,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         jsonContent.put("hostLevelParams", hostLevelParams);
         jsonContent.put("hostname", hostName);
         jsonContent.put("clusterName", cluster.getClusterName());
+        jsonContent.put("versionAdvertised", componentInfo.isVersionAdvertised());
         jsonConfigurations = gson.toJson(jsonContent);
 
         File tmpDirectory = new File(TMP_PATH);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index 1609050..a0f58cb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -22,6 +22,7 @@ import os
 from resource_management.libraries.functions import stack_tools
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.core.resources.packaging import Package
+from resource_management.core.logger import Logger
 
 def install_packages():
   import params
@@ -31,7 +32,9 @@ def install_packages():
   packages = ['unzip', 'curl']
   if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
+    if stack_selector_package:
+      packages.append(stack_selector_package)
+
   Package(packages,
           retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
           retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
index 71a02f5..4eca710 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
@@ -225,6 +225,7 @@ public class AmbariCustomCommandExecutionHelperTest {
     Assert.assertTrue(command.getHostLevelParams().containsKey(ExecutionCommand.KeyNames.USER_GROUPS));
     Assert.assertEquals("{\"zookeeperUser\":[\"zookeeperGroup\"]}", command.getHostLevelParams().get(ExecutionCommand.KeyNames.USER_GROUPS));
     Assert.assertEquals(true, command.getForceRefreshConfigTagsBeforeExecution());
+    Assert.assertFalse(command.getVersionAdvertised());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f2523e7/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 554e089..dfb8cb7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -275,6 +275,7 @@ public class AmbariManagementControllerTest {
     hostDAO = injector.getInstance(HostDAO.class);
     topologyHostInfoDAO = injector.getInstance(TopologyHostInfoDAO.class);
     hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
+
     stackManagerMock = (StackManagerMock) ambariMetaInfo.getStackManager();
     EasyMock.replay(injector.getInstance(AuditLogger.class));
   }
@@ -1271,8 +1272,6 @@ public class AmbariManagementControllerTest {
     crReq.setDesiredConfig(Collections.singletonList(cr3));
     controller.updateClusters(Collections.singleton(crReq), null);
 
-
-
     // Install
     installService(cluster1, serviceName, false, false);
     ExecutionCommand ec =
@@ -1312,6 +1311,7 @@ public class AmbariManagementControllerTest {
     assertEquals("[\"myhdfsgroup\"]", ec.getHostLevelParams().get(ExecutionCommand.KeyNames.GROUP_LIST));
     assertTrue(ec.getHostLevelParams().containsKey(ExecutionCommand.KeyNames.USER_GROUPS));
     assertEquals("{\"myhdfsuser\":[\"myhdfsgroup\"]}", ec.getHostLevelParams().get(ExecutionCommand.KeyNames.USER_GROUPS));
+    assertEquals(ec.getVersionAdvertised(), false);
   }
 
   @Test


[14/34] ambari git commit: AMBARI-20746 Change to warning log if Ambari doesn't have baseurls for Install Packages step before EU/RU (dili)

Posted by nc...@apache.org.
AMBARI-20746 Change to warning log if Ambari doesn't have baseurls for Install Packages step before EU/RU (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b9c82add
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b9c82add
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b9c82add

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: b9c82add7fbe5ef652da1a47a0276b63a279470c
Parents: 273dfca
Author: Di Li <di...@apache.org>
Authored: Thu Apr 13 13:15:56 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Apr 13 13:15:56 2017 -0400

----------------------------------------------------------------------
 .../internal/ClusterStackVersionResourceProvider.java           | 3 +++
 .../controller/internal/HostStackVersionResourceProvider.java   | 5 +++++
 .../main/resources/custom_actions/scripts/install_packages.py   | 2 +-
 3 files changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b9c82add/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 92e72ed..1d278d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -610,6 +610,9 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
         osFamily, repoVersion.getVersion(), stackId));
     }
 
+    if (repoInfo.isEmpty()){
+      LOG.error(String.format("Repository list is empty. Ambari may not be managing the repositories for %s", osFamily));
+    }
 
     // determine packages for all services that are installed on host
     List<ServiceOsSpecific.Package> packages = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9c82add/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index 8f2d4e6..811ce9b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -398,6 +398,11 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
                       "not defined. Repo version=%s, stackId=%s",
         osFamily, desiredRepoVersion, stackId));
     }
+
+    if (repoInfo.isEmpty()){
+      LOG.error(String.format("Repository list is empty. Ambari may not be managing the repositories for %s", osFamily));
+    }
+
     // For every host at cluster, determine packages for all installed services
     List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9c82add/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index 112abe3..33adce1 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -127,7 +127,7 @@ class InstallPackages(Script):
     Logger.info("Will install packages for repository version {0}".format(self.repository_version))
 
     if 0 == len(base_urls):
-      Logger.info("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))
+      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))
 
     try:
       append_to_file = False


[17/34] ambari git commit: AMBARI-20752. Update the Accordion styles in horton style.(xiwang)

Posted by nc...@apache.org.
AMBARI-20752. Update the Accordion styles in horton style.(xiwang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0c778e77
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0c778e77
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0c778e77

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 0c778e77ac05caf24e1a0a1028533ec42ff35da2
Parents: 7e46412
Author: Xi Wang <xi...@apache.org>
Authored: Wed Apr 12 15:06:17 2017 -0700
Committer: Xi Wang <xi...@apache.org>
Committed: Thu Apr 13 13:42:29 2017 -0700

----------------------------------------------------------------------
 .../app/styles/theme/bootstrap-ambari.css       | 20 ++++++++------------
 1 file changed, 8 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0c778e77/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index b2f5ca4..3164584 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -1352,16 +1352,16 @@ input.radio:checked + label:after {
 .accordion .panel-group .panel,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel {
   border-radius: 0px;
-  border: 1px solid;
-  border-color: #ccc transparent;
-  border-bottom: none;
+  border: none;
   margin-top: 0px;
 }
 .accordion .panel-group .panel .panel-heading,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading {
   height: 50px;
   padding: 15px 10px;
-  border: 1px solid transparent;
+  border: 1px solid;
+  border-color: #ddd transparent;
+  border-top: none;
   background: #fff;
 }
 .accordion .panel-group .panel .panel-heading .panel-title,
@@ -1371,29 +1371,25 @@ input.radio:checked + label:after {
   font-style: normal;
   line-height: 1;
   color: #333;
-  color: #1491c1;
 }
 .accordion .panel-group .panel .panel-heading .panel-title > a,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading .panel-title > a {
   font-size: 18px;
+  color: #333;
 }
 .accordion .panel-group .panel .panel-heading .panel-title > i,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading .panel-title > i {
   font-size: 20px;
+  color: #1491c1;
 }
 .accordion .panel-group .panel .panel-heading:hover,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading:hover {
   background: #f3faff;
-  border: 1px solid #a7dff2;
   cursor: pointer;
 }
 .accordion .panel-group .panel .panel-body,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-body {
-  padding: 30px 20px;
-}
-.accordion .panel-group:last-child .panel,
-.wizard .wizard-body .wizard-content .accordion .panel-group:last-child .panel {
-  border-bottom: 1px solid #ccc;
+  padding: 15px 10px 20px 20px;
 }
 h1,
 h2,
@@ -1464,4 +1460,4 @@ a.disabled:hover,
 a:visited.disabled:hover,
 a:focus.disabled:hover {
   text-decoration: none;
-}
\ No newline at end of file
+}


[21/34] ambari git commit: AMBARI-20763. Update YARN's ATS configs 'apptimelineserver_heapsize' and 'yarn.timeline-service.entity-group-fs-store.app-cache-size' logic in 2.6.

Posted by nc...@apache.org.
AMBARI-20763. Update YARN's ATS configs 'apptimelineserver_heapsize' and 'yarn.timeline-service.entity-group-fs-store.app-cache-size' logic in 2.6.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/38f84bf1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/38f84bf1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/38f84bf1

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 38f84bf12d04981017bbcfc3d44c243f062ed055
Parents: f894e48
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Thu Apr 13 15:57:10 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Thu Apr 13 15:57:10 2017 -0700

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/service_advisor.py           |  76 ++++
 .../services/YARN/configuration/yarn-env.xml    |  18 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  80 ++++
 .../stacks/2.5/common/test_stack_advisor.py     |   7 +
 .../stacks/2.6/common/test_stack_advisor.py     | 452 ++++++++++++++++++-
 5 files changed, 627 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/38f84bf1/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index fc32001..1ac7849 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -420,6 +420,7 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
   def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
 
     if "yarn-site" in services["configurations"] and \
                     "yarn.resourcemanager.scheduler.monitor.enable" in services["configurations"]["yarn-site"]["properties"]:
@@ -466,6 +467,81 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
     else:
       self.logger.info("Not setting Yarn Repo user for Ranger.")
 
+    yarn_timeline_app_cache_size = None
+    host_mem = None
+    for host in hosts["items"]:
+      host_mem = host["Hosts"]["total_mem"]
+      break
+    # Check if 'yarn.timeline-service.entity-group-fs-store.app-cache-size' in changed configs.
+    changed_configs_has_ats_cache_size = self.isConfigPropertiesChanged(
+      services, "yarn-site", ['yarn.timeline-service.entity-group-fs-store.app-cache-size'], False)
+    # Check if it's : 1. 'apptimelineserver_heapsize' changed detected in changed-configurations)
+    # OR 2. cluster initialization (services['changed-configurations'] should be empty in this case)
+    if changed_configs_has_ats_cache_size:
+      yarn_timeline_app_cache_size = self.read_yarn_apptimelineserver_cache_size(services)
+    elif 0 == len(services['changed-configurations']):
+      # Fetch host memory from 1st host, to be used for ATS config calculations below.
+      if host_mem is not None:
+        yarn_timeline_app_cache_size = self.calculate_yarn_apptimelineserver_cache_size(host_mem)
+        putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.app-cache-size', yarn_timeline_app_cache_size)
+        self.logger.info("Updated YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as : {0}, "
+                         "using 'host_mem' = {1}".format(yarn_timeline_app_cache_size, host_mem))
+      else:
+        self.logger.info("Couldn't update YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as "
+                         "'host_mem' read = {0}".format(host_mem))
+
+    if yarn_timeline_app_cache_size is not None:
+      # Calculation for 'ats_heapsize' is in MB.
+      ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size)
+      putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB
+      self.logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize))
+
+  """
+  Calculate YARN config 'apptimelineserver_heapsize' in MB.
+  """
+  def calculate_yarn_apptimelineserver_heapsize(self, host_mem, yarn_timeline_app_cache_size):
+    ats_heapsize = None
+    if host_mem < 4096:
+      ats_heapsize = 1024
+    else:
+      ats_heapsize = long(min(math.floor(host_mem/2), long(yarn_timeline_app_cache_size) * 500 + 3072))
+    return ats_heapsize
+
+  """
+  Calculates for YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size', based on YARN's NodeManager size.
+  """
+  def calculate_yarn_apptimelineserver_cache_size(self, host_mem):
+    yarn_timeline_app_cache_size = None
+    if host_mem < 4096:
+      yarn_timeline_app_cache_size = 3
+    elif host_mem >= 4096 and host_mem < 8192:
+      yarn_timeline_app_cache_size = 7
+    elif host_mem >= 8192:
+      yarn_timeline_app_cache_size = 10
+    self.logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size))
+    return yarn_timeline_app_cache_size
+
+
+  """
+  Reads YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size'.
+  """
+  def read_yarn_apptimelineserver_cache_size(self, services):
+    """
+    :type services dict
+    :rtype str
+    """
+    yarn_ats_app_cache_size = None
+    yarn_ats_app_cache_size_config = "yarn.timeline-service.entity-group-fs-store.app-cache-size"
+    yarn_site_in_services = self.getServicesSiteProperties(services, "yarn-site")
+
+    if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services:
+      yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config]
+      self.logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size))
+
+    if not yarn_ats_app_cache_size:
+      self.logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config))
+
+    return yarn_ats_app_cache_size
 
   #region LLAP
   def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):

http://git-wip-us.apache.org/repos/asf/ambari/blob/38f84bf1/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
index d04c3c5..1b2ca68 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
@@ -25,6 +25,24 @@
     <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.timeline-service.entity-group-fs-store.app-cache-size</name>
+      </property>
+    </depends-on>
+  </property>
   <!-- yarn-env.sh -->
   <property>
     <name>content</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/38f84bf1/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 7881917..38f46d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -16,6 +16,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 """
+import math
+
 import json
 import re
 from resource_management.libraries.functions import format
@@ -146,6 +148,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
 
     if "yarn-site" in services["configurations"] and \
                     "yarn.resourcemanager.scheduler.monitor.enable" in services["configurations"]["yarn-site"]["properties"]:
@@ -186,6 +189,83 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     else:
       self.logger.info("Not setting Yarn Repo user for Ranger.")
 
+
+    yarn_timeline_app_cache_size = None
+    host_mem = None
+    for host in hosts["items"]:
+      host_mem = host["Hosts"]["total_mem"]
+      break
+    # Check if 'yarn.timeline-service.entity-group-fs-store.app-cache-size' in changed configs.
+    changed_configs_has_ats_cache_size = self.isConfigPropertiesChanged(
+      services, "yarn-site", ['yarn.timeline-service.entity-group-fs-store.app-cache-size'], False)
+    # Check if it's : 1. 'apptimelineserver_heapsize' changed detected in changed-configurations)
+    # OR 2. cluster initialization (services['changed-configurations'] should be empty in this case)
+    if changed_configs_has_ats_cache_size:
+      yarn_timeline_app_cache_size = self.read_yarn_apptimelineserver_cache_size(services)
+    elif 0 == len(services['changed-configurations']):
+      # Fetch host memory from 1st host, to be used for ATS config calculations below.
+      if host_mem is not None:
+        yarn_timeline_app_cache_size = self.calculate_yarn_apptimelineserver_cache_size(host_mem)
+        putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.app-cache-size', yarn_timeline_app_cache_size)
+        self.logger.info("Updated YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as : {0}, "
+                    "using 'host_mem' = {1}".format(yarn_timeline_app_cache_size, host_mem))
+      else:
+        self.logger.info("Couldn't update YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as "
+                    "'host_mem' read = {0}".format(host_mem))
+
+    if yarn_timeline_app_cache_size is not None:
+      # Calculation for 'ats_heapsize' is in MB.
+      ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size)
+      putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB
+      self.logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize))
+
+  """
+  Calculate YARN config 'apptimelineserver_heapsize' in MB.
+  """
+  def calculate_yarn_apptimelineserver_heapsize(self, host_mem, yarn_timeline_app_cache_size):
+    ats_heapsize = None
+    if host_mem < 4096:
+      ats_heapsize = 1024
+    else:
+      ats_heapsize = long(min(math.floor(host_mem/2), long(yarn_timeline_app_cache_size) * 500 + 3072))
+    return ats_heapsize
+
+  """
+  Calculates for YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size', based on YARN's NodeManager size.
+  """
+  def calculate_yarn_apptimelineserver_cache_size(self, host_mem):
+    yarn_timeline_app_cache_size = None
+    if host_mem < 4096:
+      yarn_timeline_app_cache_size = 3
+    elif host_mem >= 4096 and host_mem < 8192:
+      yarn_timeline_app_cache_size = 7
+    elif host_mem >= 8192:
+      yarn_timeline_app_cache_size = 10
+    self.logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size))
+    return yarn_timeline_app_cache_size
+
+
+  """
+  Reads YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size'.
+  """
+  def read_yarn_apptimelineserver_cache_size(self, services):
+    """
+    :type services dict
+    :rtype str
+    """
+    yarn_ats_app_cache_size = None
+    yarn_ats_app_cache_size_config = "yarn.timeline-service.entity-group-fs-store.app-cache-size"
+    yarn_site_in_services = self.getServicesSiteProperties(services, "yarn-site")
+
+    if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services:
+      yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config]
+      self.logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size))
+
+    if not yarn_ats_app_cache_size:
+      self.logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config))
+
+    return yarn_ats_app_cache_size
+
   def getMetadataConnectionString(self, database_type):
       driverDict = {
           'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',

http://git-wip-us.apache.org/repos/asf/ambari/blob/38f84bf1/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 4250681..77a06fe 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -926,6 +926,8 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -4918,6 +4920,9 @@ class TestHDP25StackAdvisor(TestCase):
                                   "capacity-scheduler":{"properties":{
                                     "capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue,leaf\n" +
                                                           "yarn.scheduler.capacity.root.ndfqueue.queues=ndfqueue1,ndfqueue2\n"}}}
+    services["changed-configurations"]= []
+
+
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
@@ -4948,6 +4953,8 @@ class TestHDP25StackAdvisor(TestCase):
           "stack_versions": ["2.4", "2.3", "2.2", "2.1", "2.0.6"]
         }
       },
+      "changed-configurations": [
+      ],
       "configurations": configurations,
       "services": [],
       "ambari-server-properties": {}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/38f84bf1/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 5bfa1a9..2d7322d 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -989,7 +989,26 @@ class TestHDP26StackAdvisor(TestCase):
       "yarnMinContainerSize": 256
     }
 
-    hosts = {}
+    hosts = {
+      "items": [
+      {
+        "Hosts": {
+          "cpu_count": 6,
+          "total_mem": 50331648,
+          "disk_info": [
+            {"mountpoint": "/"},
+            {"mountpoint": "/dev/shm"},
+            {"mountpoint": "/vagrant"},
+            {"mountpoint": "/"},
+            {"mountpoint": "/dev/shm"},
+            {"mountpoint": "/vagrant"}
+          ],
+          "public_host_name": "c6401.ambari.apache.org",
+          "host_name": "c6401.ambari.apache.org"
+        },
+      }
+      ]
+    }
 
     services = {
       "services":
@@ -1007,6 +1026,8 @@ class TestHDP26StackAdvisor(TestCase):
         "stack_name" : "HDP",
         "stack_version": "2.6"
       },
+      "changed-configurations": [
+      ],
       "configurations": configurations,
       "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
@@ -1016,6 +1037,7 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-env': {
         'properties': {
           'min_user_id': '500',
+          'apptimelineserver_heapsize': '8072',
           'service_check.queue.name': 'default'
         }
       },
@@ -1131,11 +1153,16 @@ class TestHDP26StackAdvisor(TestCase):
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
           'yarn.nodemanager.resource.memory-mb': '768',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '10',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '768',
-          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
         },
         'property_attributes': {
           'yarn.authorization-provider': {
@@ -1267,6 +1294,8 @@ class TestHDP26StackAdvisor(TestCase):
         "components": []
       }
       ],
+      "changed-configurations": [
+      ],
       "configurations": configurations
     }
 
@@ -1282,7 +1311,8 @@ class TestHDP26StackAdvisor(TestCase):
         'properties': {
           'yarn_user': 'custom_yarn',
           'service_check.queue.name': 'default',
-          'min_user_id': '500'
+          'min_user_id': '500',
+          'apptimelineserver_heapsize': '2048'
         }
       },
       'ranger-yarn-plugin-properties': {
@@ -1304,19 +1334,429 @@ class TestHDP26StackAdvisor(TestCase):
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '1280',
-          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '7',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
+
         }
       }
     }
 
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
+
     configurations['yarn-env']['properties']['yarn_user'] = 'yarn'
     expected['yarn-env']['properties']['yarn_user'] = 'yarn'
     expected['ranger-yarn-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'yarn'
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+  def test_recommendYARNConfigurations_for_ats_heapsize_and_cache(self):
+    configurations = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+    services = {
+      "services" : [{
+        "StackServices": {
+          "service_name" : "YARN",
+          "service_version" : "2.7.3.2.6"
+        },
+        "components": []
+      }
+      ],
+      "changed-configurations": [
+      ],
+      "configurations": configurations
+    }
+
+
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      'yarn-env': {
+        'properties': {
+          'yarn_user': 'custom_yarn',
+          'service_check.queue.name': 'default',
+          'min_user_id': '500',
+          'apptimelineserver_heapsize': '1024'
+        }
+      },
+      'ranger-yarn-plugin-properties': {
+        'properties': {
+          'ranger-yarn-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_yarn'
+        }
+      },
+      'yarn-site': {
+        'properties': {
+          'hadoop.registry.rm.enabled': 'false',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
+          'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
+          'yarn.acl.enable': 'true',
+          'yarn.scheduler.minimum-allocation-vcores': '1',
+          'yarn.scheduler.maximum-allocation-vcores': '4',
+          'yarn.nodemanager.resource.memory-mb': '1280',
+          'yarn.scheduler.minimum-allocation-mb': '256',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.nodemanager.resource.cpu-vcores': '4',
+          'yarn.scheduler.maximum-allocation-mb': '1280',
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '3',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
+
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 2048,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+
+
+    '''
+    Test 1 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 2048
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 3
+           - apptimelineserver_heapsize = 1024
+    '''
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 2 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 4096
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 7
+           - apptimelineserver_heapsize = 2048
+    '''
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '2048'
+    expected['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.app-cache-size'] = '7'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 3 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 8192
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 10
+           - apptimelineserver_heapsize = 4096
+    '''
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 8192,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '4096'
+    expected['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.app-cache-size'] = '10'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 4 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 2048
+    O/P :
+       -  Config value recommended for:
+           - apptimelineserver_heapsize = 4096
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "7"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+
+    '''
+    Test 5 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 4096
+    O/P :
+       -  Config value recommended for:
+           - apptimelineserver_heapsize = 2048
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "7"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '2048'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
 
+
+
+    '''
+    Test 6 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 8196
+    O/P :
+       -  Config value recommended for:
+           - Shouldn't have yarn.timeline-service.entity-group-fs-store.app-cache-size
+           - apptimelineserver_heapsize = 4572
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "3"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 16392,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '4572'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
   def test_recommendKAFKAConfigurations(self):
     configurations = {
       "kafka-env": {


[04/34] ambari git commit: AMBARI-20750. Remove multiprocessing-based StatusCommandsExecutor (Eugene Chekanskiy via smohanty)

Posted by nc...@apache.org.
AMBARI-20750. Remove multiprocessing-based StatusCommandsExecutor (Eugene Chekanskiy via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ef0c99a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ef0c99a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ef0c99a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 5ef0c99a9d477b63f4e7213d058c9ab2d3ac2feb
Parents: ef34cb4
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Apr 12 12:35:15 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Apr 12 12:35:15 2017 -0700

----------------------------------------------------------------------
 .../ambari_agent/StatusCommandsExecutor.py      | 279 +------------------
 1 file changed, 2 insertions(+), 277 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5ef0c99a/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py b/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
index 142e7ca..f42e134 100644
--- a/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
@@ -83,280 +83,5 @@ class SingleProcessStatusCommandsExecutor(StatusCommandsExecutor):
   def kill(self, reason=None, can_relaunch=True):
     pass
 
-class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
-  def __init__(self, config, actionQueue):
-    self.config = config
-    self.actionQueue = actionQueue
-
-    self.can_relaunch = True
-
-    # used to prevent queues from been used during creation of new one to prevent threads messing up with combination of
-    # old and new queues
-    self.usage_lock = threading.RLock()
-    # protects against simultaneous killing/creating from different threads.
-    self.kill_lock = threading.RLock()
-
-    self.status_command_timeout = int(self.config.get('agent', 'status_command_timeout', 5))
-    self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
-
-    self.worker_process = None
-    self.mustDieEvent = multiprocessing.Event()
-    self.timedOutEvent = multiprocessing.Event()
-
-    # multiprocessing stuff that need to be cleaned every time
-    self.mp_result_queue = multiprocessing.Queue()
-    self.mp_result_logs = multiprocessing.Queue()
-    self.mp_task_queue = multiprocessing.Queue()
-
-  def _drain_queue(self, target_queue, max_time=5, max_empty_count=15, read_break=.001):
-    """
-    Read everything that available in queue. Using not reliable multiprocessing.Queue methods(qsize, empty), so contains
-    extremely dumb protection against blocking too much at this method: will try to get all possible items for not more
-    than ``max_time`` seconds; will return after ``max_empty_count`` calls of ``target_queue.get(False)`` that raised
-    ``Queue.Empty`` exception. Notice ``read_break`` argument, with default values this method will be able to read
-    ~4500 ``range(1,10000)`` objects for 5 seconds. So don't fill queue too fast.
-
-    :param target_queue: queue to read from
-    :param max_time: maximum time to spend in this method call
-    :param max_empty_count: maximum allowed ``Queue.Empty`` in a row
-    :param read_break: time to wait before next read cycle iteration
-    :return: list of resulting objects
-    """
-    results = []
-    _empty = 0
-    _start = time.time()
-    with self.usage_lock:
-      try:
-        while (not target_queue.empty() or target_queue.qsize() > 0) and time.time() - _start < max_time and _empty < max_empty_count:
-          try:
-            results.append(target_queue.get(False))
-            _empty = 0
-            time.sleep(read_break) # sleep a little to get more accurate empty and qsize results
-          except Queue.Empty:
-            _empty += 1
-          except IOError:
-            pass
-          except UnicodeDecodeError:
-            pass
-      except IOError:
-        pass
-    return results
-
-  def _log_message(self, level, message, exception=None):
-    """
-    Put log message to logging queue. Must be used only for logging from child process(in _worker_process_target).
-
-    :param level:
-    :param message:
-    :param exception:
-    :return:
-    """
-    result_message = "StatusCommandExecutor reporting at {0}: ".format(time.time()) + message
-    self.mp_result_logs.put((level, result_message, exception))
-
-  def _process_logs(self):
-    """
-    Get all available at this moment logs and prints them to logger.
-    """
-    for level, message, exception in self._drain_queue(self.mp_result_logs):
-      if level == logging.ERROR:
-        logger.debug(message, exc_info=exception)
-      if level == logging.WARN:
-        logger.warn(message)
-      if level == logging.INFO:
-        logger.info(message)
-
-  def _worker_process_target(self):
-    """
-    Internal method that running in separate process.
-    """
-    # cleanup monkey-patching results in child process, as it causing problems
-    import subprocess
-    reload(subprocess)
-    import multiprocessing
-    reload(multiprocessing)
-
-    bind_debug_signal_handlers()
-    self._log_message(logging.INFO, "StatusCommandsExecutor process started")
-
-    # region StatusCommandsExecutor process internals
-    internal_in_queue = Queue.Queue()
-    internal_out_queue = Queue.Queue()
-
-    def _internal_worker():
-      """
-      thread that actually executes status commands
-      """
-      while True:
-        _cmd = internal_in_queue.get()
-        internal_out_queue.put(self.actionQueue.execute_status_command_and_security_status(_cmd))
-
-    worker = threading.Thread(target=_internal_worker)
-    worker.daemon = True
-    worker.start()
-
-    def _internal_process_command(_command):
-      internal_in_queue.put(_command)
-      start_time = time.time()
-      result = None
-      while not self.mustDieEvent.is_set() and not result and time.time() - start_time < self.status_command_timeout:
-        try:
-          result = internal_out_queue.get(timeout=1)
-        except Queue.Empty:
-          pass
-
-      if result:
-        self.mp_result_queue.put(result)
-        return True
-      else:
-        # do not set timed out event twice
-        if not self.timedOutEvent.is_set():
-          self._set_timed_out(_command)
-        return False
-
-    # endregion
-
-    try:
-      while not self.mustDieEvent.is_set():
-        try:
-          command = self.mp_task_queue.get(False)
-        except Queue.Empty:
-          # no command, lets try in other loop iteration
-          time.sleep(.1)
-          continue
-
-        self._log_message(logging.DEBUG, "Running status command for {0}".format(command['componentName']))
-
-        if _internal_process_command(command):
-          self._log_message(logging.DEBUG, "Completed status command for {0}".format(command['componentName']))
-
-    except Exception as e:
-      self._log_message(logging.ERROR, "StatusCommandsExecutor process failed with exception:", e)
-      raise
-
-    self._log_message(logging.INFO, "StatusCommandsExecutor subprocess finished")
-
-  def _set_timed_out(self, command):
-    """
-    Set timeout event and adding log entry for given command.
-
-    :param command:
-    :return:
-    """
-    msg = "Command {0} for {1} is running for more than {2} seconds. Terminating it due to timeout.".format(
-        command['commandType'],
-        command['componentName'],
-        self.status_command_timeout
-    )
-    self._log_message(logging.WARN, msg)
-    self.timedOutEvent.set()
-
-  def put_commands(self, commands):
-    """
-    Put given commands to command executor.
-
-    :param commands: status commands to execute
-    :return:
-    """
-    with self.usage_lock:
-      for command in commands:
-        logger.info("Adding " + command['commandType'] + " for component " + \
-                    command['componentName'] + " of service " + \
-                    command['serviceName'] + " of cluster " + \
-                    command['clusterName'] + " to the queue.")
-        self.mp_task_queue.put(command)
-        logger.debug(pprint.pformat(command))
-
-  def process_results(self):
-    """
-    Process all the results from the SCE worker process.
-    """
-    self._process_logs()
-    results = self._drain_queue(self.mp_result_queue)
-    logger.debug("Drained %s status commands results, ~%s remains in queue", len(results), self.mp_result_queue.qsize())
-    for result in results:
-      try:
-        self.actionQueue.process_status_command_result(result)
-      except UnicodeDecodeError:
-        pass
-
-  @property
-  def need_relaunch(self):
-    """
-    Indicates if process need to be relaunched due to timeout or it is dead or even was not created.
-
-    :return: tuple (bool, str|None) with flag to relaunch and reason of relaunch
-    """
-    if not self.worker_process or not self.worker_process.is_alive():
-      return True, "WORKER_DEAD"
-    elif self.timedOutEvent.is_set():
-      return True, "COMMAND_TIMEOUT"
-    return False, None
-
-  def relaunch(self, reason=None):
-    """
-    Restart status command executor internal process.
-
-    :param reason: reason of restart
-    :return:
-    """
-    with self.kill_lock:
-      logger.info("Relaunching child process reason:" + str(reason))
-      if self.can_relaunch:
-        self.kill(reason)
-        self.worker_process = multiprocessing.Process(target=self._worker_process_target)
-        self.worker_process.start()
-        logger.info("Started process with pid {0}".format(self.worker_process.pid))
-      else:
-        logger.debug("Relaunch does not allowed, can not relaunch")
-
-  def kill(self, reason=None, can_relaunch=True):
-    """
-    Tries to stop command executor internal process for sort time, otherwise killing it. Closing all possible queues to
-    unblock threads that probably blocked on read or write operations to queues. Must be called from threads different
-    from threads that calling read or write methods(get_log_messages, get_results, put_commands).
-
-    :param can_relaunch: indicates if StatusCommandsExecutor can be relaunched after this kill
-    :param reason: reason of killing
-    :return:
-    """
-    with self.kill_lock:
-      self.can_relaunch = can_relaunch
-
-      if not self.can_relaunch:
-        logger.info("Killing without possibility to relaunch...")
-
-      # try graceful stop, otherwise hard-kill
-      if self.worker_process and self.worker_process.is_alive():
-        self.mustDieEvent.set()
-        self.worker_process.join(timeout=3)
-        if self.worker_process.is_alive():
-          os.kill(self.worker_process.pid, signal.SIGKILL)
-          logger.info("Child process killed by -9")
-        else:
-          # get log messages only if we died gracefully, otherwise we will have chance to block here forever, in most cases
-          # this call will do nothing, as all logs will be processed in ActionQueue loop
-          self._process_logs()
-          logger.info("Child process died gracefully")
-      else:
-        logger.info("Child process already dead")
-
-      # close queues and acquire usage lock
-      # closing both sides of pipes here, we need this hack in case of blocking on recv() call
-      self.mp_result_queue.close()
-      self.mp_result_queue._writer.close()
-      self.mp_result_logs.close()
-      self.mp_result_logs._writer.close()
-      self.mp_task_queue.close()
-      self.mp_task_queue._writer.close()
-
-      with self.usage_lock:
-        self.mp_result_queue.join_thread()
-        self.mp_result_queue = multiprocessing.Queue()
-        self.mp_task_queue.join_thread()
-        self.mp_task_queue = multiprocessing.Queue()
-        self.mp_result_logs.join_thread()
-        self.mp_result_logs = multiprocessing.Queue()
-        self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
-        self.mustDieEvent.clear()
-        self.timedOutEvent.clear()
+# TODO make reliable MultiProcessStatusCommandsExecutor implementation
+MultiProcessStatusCommandsExecutor = SingleProcessStatusCommandsExecutor


[06/34] ambari git commit: AMBARI-20578 APPENDUM-2 Log Search Configuration API (mgergely)

Posted by nc...@apache.org.
AMBARI-20578 APPENDUM-2 Log Search Configuration API (mgergely)

Change-Id: I5db7d93c68148d1d65bddf3a1f085fb17dcccbf9


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aca12b7a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aca12b7a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aca12b7a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: aca12b7a3387211a51718026215a30877960035d
Parents: 68b7b56
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Thu Apr 13 01:28:16 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Thu Apr 13 01:28:25 2017 +0200

----------------------------------------------------------------------
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |  2 +-
 .../hooks/after-INSTALL/test_after_install.py   | 25 +++++++++++++++++++-
 2 files changed, 25 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aca12b7a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index fbea258..9abd2fe 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -56,7 +56,7 @@ service_package_folder = config['commandParams']['service_package_folder']
 logsearch_service_name = service_name.lower().replace("_", "-")
 logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
 logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
-logsearch_config_file_exists = os.path.exists(logsearch_config_file_path)
+logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
 
 # default hadoop params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

http://git-wip-us.apache.org/repos/asf/ambari/blob/aca12b7a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
index 1bfa173..19c785c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
@@ -24,6 +24,7 @@ from stacks.utils.RMFTestCase import *
 from resource_management.libraries.functions import conf_select
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
+@patch("os.path.isfile", new = MagicMock(return_value=False))
 class TestHookAfterInstall(RMFTestCase):
 
   def test_hook_default(self):
@@ -40,7 +41,11 @@ class TestHookAfterInstall(RMFTestCase):
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                               only_if="ls /etc/hadoop/conf")
-
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
     self.assertNoMoreResources()
 
 
@@ -81,6 +86,12 @@ class TestHookAfterInstall(RMFTestCase):
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:
@@ -148,6 +159,12 @@ class TestHookAfterInstall(RMFTestCase):
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:
@@ -248,6 +265,12 @@ class TestHookAfterInstall(RMFTestCase):
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:


[12/34] ambari git commit: AMBARI-20736. Allow Potentially Long Running Restart Commands To Have Their Own Timeout (ncole)

Posted by nc...@apache.org.
AMBARI-20736. Allow Potentially Long Running Restart Commands To Have Their Own Timeout (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ac75f1da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ac75f1da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ac75f1da

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: ac75f1daccc2c1117e175f95cd9642e85b4fd366
Parents: 4f41968
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Apr 11 14:36:43 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Thu Apr 13 08:53:50 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/decorator.py            | 23 +++++--
 .../AmbariCustomCommandExecutionHelper.java     | 12 +++-
 .../internal/UpgradeResourceProvider.java       |  8 ++-
 .../server/state/stack/upgrade/Grouping.java    |  2 +-
 .../state/stack/upgrade/StageWrapper.java       | 65 +++++++++++++++++++
 .../ambari/server/state/stack/upgrade/Task.java |  6 ++
 .../server/state/stack/upgrade/TaskWrapper.java | 25 +++++++-
 .../state/stack/upgrade/TaskWrapperBuilder.java |  5 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 11 +++-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  2 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |  2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |  2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |  2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |  2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |  2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |  2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |  2 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     |  2 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |  2 +-
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |  2 +-
 .../src/main/resources/upgrade-pack.xsd         |  1 +
 .../internal/UpgradeResourceProviderTest.java   | 66 +++++++++++++++++++-
 .../stacks/HDP/2.1.1/upgrades/upgrade_test.xml  |  2 +-
 23 files changed, 218 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py b/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
index 55cf335..b5b804d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
@@ -26,13 +26,15 @@ __all__ = ['retry', 'safe_retry', ]
 from resource_management.core.logger import Logger
 
 
-def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception):
+def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, timeout_func=None):
   """
   Retry decorator for improved robustness of functions.
-  :param times: Number of times to attempt to call the function.
+  :param times: Number of times to attempt to call the function.  Optionally specify the timeout_func.
   :param sleep_time: Initial sleep time between attempts
   :param backoff_factor: After every failed attempt, multiple the previous sleep time by this factor.
   :param err_class: Exception class to handle
+  :param timeout_func: used when the 'times' argument should be computed.  this function should
+         return an integer value that indicates the number of seconds to wait
   :return: Returns the output of the wrapped function.
   """
   def decorator(function):
@@ -42,6 +44,10 @@ def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=E
       _backoff_factor = backoff_factor
       _err_class = err_class
 
+      if timeout_func is not None:
+        timeout = timeout_func()
+        _times = timeout // sleep_time  # ensure we end up with an integer
+
       while _times > 1:
         _times -= 1
         try:
@@ -49,7 +55,8 @@ def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=E
         except _err_class, err:
           Logger.info("Will retry %d time(s), caught exception: %s. Sleeping for %d sec(s)" % (_times, str(err), _sleep_time))
           time.sleep(_sleep_time)
-        if(_sleep_time * _backoff_factor <= max_sleep_time):
+
+        if _sleep_time * _backoff_factor <= max_sleep_time:
           _sleep_time *= _backoff_factor
 
       return function(*args, **kwargs)
@@ -57,15 +64,17 @@ def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=E
   return decorator
 
 
-def safe_retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, return_on_fail=None):
+def safe_retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, return_on_fail=None, timeout_func=None):
   """
   Retry decorator for improved robustness of functions. Instead of error generation on the last try, will return
   return_on_fail value.
-  :param times: Number of times to attempt to call the function.
+  :param times: Number of times to attempt to call the function.  Optionally specify the timeout_func.
   :param sleep_time: Initial sleep time between attempts
   :param backoff_factor: After every failed attempt, multiple the previous sleep time by this factor.
   :param err_class: Exception class to handle
   :param return_on_fail value to return on the last try
+  :param timeout_func: used when the 'times' argument should be computed.  this function should
+         return an integer value that indicates the number of seconds to wait
   :return: Returns the output of the wrapped function.
   """
   def decorator(function):
@@ -76,6 +85,10 @@ def safe_retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_cl
       _err_class = err_class
       _return_on_fail = return_on_fail
 
+      if timeout_func is not None:
+        timeout = timeout_func()
+        _times = timeout // sleep_time  # ensure we end up with an integer
+
       while _times > 1:
         _times -= 1
         try:

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index d5d7cf4..a493b94 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -445,7 +445,7 @@ public class AmbariCustomCommandExecutionHelper {
       }
 
       boolean isInstallCommand = commandName.equals(RoleCommand.INSTALL.toString());
-      String commandTimeout = configs.getDefaultAgentTaskTimeout(isInstallCommand);
+      int commandTimeout = Short.valueOf(configs.getDefaultAgentTaskTimeout(isInstallCommand)).intValue();
 
       if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
         // Service check command is not custom command
@@ -455,7 +455,7 @@ public class AmbariCustomCommandExecutionHelper {
           commandParams.put(SCRIPT, script.getScript());
           commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
           if (script.getTimeout() > 0) {
-            commandTimeout = String.valueOf(script.getTimeout());
+            commandTimeout = script.getTimeout();
           }
         } else {
           String message = String.format("Component %s has not command script " +
@@ -466,7 +466,13 @@ public class AmbariCustomCommandExecutionHelper {
         // We don't need package/repo information to perform service check
       }
 
-      commandParams.put(COMMAND_TIMEOUT, commandTimeout);
+      // !!! the action execution context timeout is the final say, but make sure it's at least 60 seconds
+      if (null != actionExecutionContext.getTimeout()) {
+        commandTimeout = actionExecutionContext.getTimeout().intValue();
+        commandTimeout = Math.max(60, commandTimeout);
+      }
+
+      commandParams.put(COMMAND_TIMEOUT, "" + commandTimeout);
       commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 709ca93..511c8fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -1314,6 +1314,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
       String serviceName = wrapper.getTasks().get(0).getService();
       ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
           stackId.getStackVersion(), serviceName);
+
       params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
     }
@@ -1324,7 +1325,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // hosts in maintenance mode are excluded from the upgrade
     actionContext.setMaintenanceModeHostExcluded(true);
 
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
 
@@ -1404,7 +1405,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
 
@@ -1440,6 +1441,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
 
     s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams);
+
     request.addStages(Collections.singletonList(stage));
   }
 
@@ -1464,7 +1466,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
index cd17a70..99ed0aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
@@ -129,6 +129,7 @@ public class Grouping {
       for (TaskBucket bucket : buckets) {
         // The TaskWrappers take into account if a task is meant to run on all, any, or master.
         // A TaskWrapper may contain multiple tasks, but typically only one, and they all run on the same set of hosts.
+        // Generate a task wrapper for every task in the bucket
         List<TaskWrapper> preTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks, params);
         List<List<TaskWrapper>> organizedTasks = organizeTaskWrappersBySyncRules(preTasks);
         for (List<TaskWrapper> tasks : organizedTasks) {
@@ -219,7 +220,6 @@ public class Grouping {
         int batchNum = 0;
         for (Set<String> hostSubset : hostSets) {
           batchNum++;
-          TaskWrapper expandedTW = new TaskWrapper(tw.getService(), tw.getComponent(), hostSubset, tw.getParams(), tw.getTasks());
 
           String stageText = getStageText(verb, ctx.getComponentDisplay(service, pc.name), hostSubset, batchNum, numBatchesNeeded);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
index aac8935..81f4e0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
@@ -25,6 +25,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.base.Objects;
 import com.google.gson.Gson;
 
@@ -33,6 +39,8 @@ import com.google.gson.Gson;
  */
 public class StageWrapper {
 
+  private static final Logger LOG = LoggerFactory.getLogger(StageWrapper.class);
+
   private static Gson gson = new Gson();
   private String text;
   private Type type;
@@ -163,4 +171,61 @@ public class StageWrapper {
         .add("text",text)
         .omitNullValues().toString();
   }
+
+  /**
+   * Gets the maximum timeout for any task that this {@code StageWrapper} encapsulates.  TaskWrappers
+   * are homogeneous across the stage, but timeouts are defined in Upgrade Packs
+   * at the task, so each one should be checked individually.
+   *
+   * <p>
+   * WARNING:  This method relies on incorrect assumptions about {@link StageWrapper}s and the {@link TaskWrapper}s
+   * that are contained in them.  Orchestration is currently forcing a StageWrapper to have only one TaskWrapper,
+   * even though they could have many per the code.
+   *
+   * In addition, a TaskWrapper should have a one-to-one reference with the Task it contains.  That will be
+   * fixed in a future release.
+   * </p>
+   *
+   * @param configuration the configuration instance.  StageWrappers are not injectable, so pass
+   *                      this in.
+   * @return the maximum timeout, or the default agent execution timeout if none are found.  Never {@code null}.
+   */
+  public Short getMaxTimeout(Configuration configuration) {
+
+    Set<String> timeoutKeys = new HashSet<>();
+
+    // !!! FIXME a TaskWrapper should have only one task.
+    for (TaskWrapper wrapper : tasks) {
+      timeoutKeys.addAll(wrapper.getTimeoutKeys());
+    }
+
+    Short defaultTimeout = Short.valueOf(configuration.getDefaultAgentTaskTimeout(false));
+
+    if (CollectionUtils.isEmpty(timeoutKeys)) {
+      return defaultTimeout;
+    }
+
+    Short timeout = null;
+
+    for (String key : timeoutKeys) {
+      String configValue = configuration.getProperty(key);
+
+      if (StringUtils.isNotBlank(configValue)) {
+        try {
+          Short configTimeout = Short.valueOf(configValue);
+
+          if (null == timeout || configTimeout > timeout) {
+            timeout = configTimeout;
+          }
+
+        } catch (Exception e) {
+          LOG.warn("Could not parse {}/{} to a timeout value", key, configValue);
+        }
+      } else {
+        LOG.warn("Configuration {} not found to compute timeout", key);
+      }
+    }
+
+    return null == timeout ? defaultTimeout : timeout;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
index 5c43c2b..5c7cb6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
@@ -42,6 +42,12 @@ public abstract class Task {
   public boolean isSequential = false;
 
   /**
+   * The config property to check for timeout.
+   */
+  @XmlAttribute(name="timeout-config")
+  public String timeoutConfig = null;
+
+  /**
    * @return the type of the task
    */
   public abstract Type getType();

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
index 11e27cf..dfa6159 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
@@ -19,10 +19,13 @@ package org.apache.ambari.server.state.stack.upgrade;
 
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang.StringUtils;
+
 import com.google.common.base.Objects;
 
 /**
@@ -34,7 +37,9 @@ public class TaskWrapper {
   private String component;
   private Set<String> hosts; // all the hosts that all the tasks must run
   private Map<String, String> params;
+  /* FIXME a TaskWrapper really should be wrapping ONLY ONE task */
   private List<Task> tasks; // all the tasks defined for the hostcomponent
+  private Set<String> timeoutKeys = new HashSet<>();
 
   /**
    * @param s the service name for the tasks
@@ -42,10 +47,11 @@ public class TaskWrapper {
    * @param hosts the set of hosts that the tasks are for
    * @param tasks an array of tasks as a convenience
    */
-  public TaskWrapper(String s, String c, Set<String> hosts, Task... tasks) {
-    this(s, c, hosts, null, Arrays.asList(tasks));
+  public TaskWrapper(String s, String c, Set<String> hosts, Task task) {
+    this(s, c, hosts, null, task);
   }
 
+
   /**
    * @param s the service name for the tasks
    * @param c the component name for the tasks
@@ -71,6 +77,13 @@ public class TaskWrapper {
     this.hosts = hosts;
     this.params = (params == null) ? new HashMap<String, String>() : params;
     this.tasks = tasks;
+
+    // !!! FIXME there should only be one task
+    for (Task task : tasks) {
+      if (StringUtils.isNotBlank(task.timeoutConfig)) {
+        timeoutKeys.add(task.timeoutConfig);
+      }
+    }
   }
 
   /**
@@ -133,4 +146,12 @@ public class TaskWrapper {
     return false;
   }
 
+
+  /**
+   * @return the timeout keys for all the tasks in this wrapper.
+   */
+  public Set<String> getTimeoutKeys() {
+    return timeoutKeys;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
index a75fe00..2212b5a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
@@ -40,13 +40,16 @@ public class TaskWrapperBuilder {
   private static Logger LOG = LoggerFactory.getLogger(TaskWrapperBuilder.class);
 
   /**
-   * Creates a collection of tasks based on the set of hosts they are allowed to run on
+   * Creates a collection of task wrappers based on the set of hosts they are allowed to run on
    * by analyzing the "hosts" attribute of any ExecuteTask objects.
+   *
    * @param service the service name for the tasks
    * @param component the component name for the tasks
    * @param hostsType the collection of sets along with their status
    * @param tasks collection of tasks
    * @param params additional parameters
+   *
+   * @return the task wrappers, one for each task that is passed with {@code tasks}
    */
   public static List<TaskWrapper> getTaskList(String service, String component, HostsType hostsType, List<Task> tasks, Map<String, String> params) {
     // Ok if Ambari Server is not part of the cluster hosts since this is only used in the calculation of how many batches

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 0489792..aa34dc0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -533,7 +533,16 @@ def is_namenode_bootstrapped(params):
   return marked
 
 
-@retry(times=125, sleep_time=5, backoff_factor=2, err_class=Fail)
+def find_timeout():
+  import params
+
+  if isinstance(params.command_timeout, (int, long)):
+    return params.command_timeout
+
+  return int(params.command_timeout)
+
+
+@retry(sleep_time=5, backoff_factor=2, err_class=Fail, timeout_func=find_timeout)
 def is_this_namenode_active():
   """
   Gets whether the current NameNode is Active. This function will wait until the NameNode is

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f0566d7..e88dbdd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -70,6 +70,8 @@ version = default("/commandParams/version", None)
 # are started using different commands.
 desired_namenode_role = default("/commandParams/desired_namenode_role", None)
 
+command_timeout = default("/commandParams/command_timeout", 900)
+
 # get the correct version to use for checking stack features
 version_for_stack_feature_checks = get_stack_feature_version(config)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 1340b22..97904bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -548,7 +548,7 @@
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
-          <task xsi:type="restart-task"/>
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 40afc4f..3757121 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -579,7 +579,7 @@
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index e0882d8..f7fd175 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -678,7 +678,7 @@
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 0f4efdc..78fe831 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -687,7 +687,7 @@
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
index d5e9a5b..fba7093 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
@@ -524,7 +524,7 @@
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 350395c..68efed2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -678,7 +678,7 @@
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 9ac3d52..2ed7962 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -692,7 +692,7 @@
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
index 04a06e8..1af96dd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
@@ -574,7 +574,7 @@
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 879fe0f..53d4579 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -619,7 +619,7 @@
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index fd72e4d..5b8f53b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -610,7 +610,7 @@
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/main/resources/upgrade-pack.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade-pack.xsd b/ambari-server/src/main/resources/upgrade-pack.xsd
index 1f11aa1..aa7ddd8 100644
--- a/ambari-server/src/main/resources/upgrade-pack.xsd
+++ b/ambari-server/src/main/resources/upgrade-pack.xsd
@@ -276,6 +276,7 @@
       <xs:element name="summary" minOccurs="0" />
     </xs:sequence>
     <xs:attribute name="sequential" use="optional" type="xs:boolean" />
+    <xs:attribute name="timeout-config" use="optional" type="xs:string" />
   </xs:complexType>
   
   <xs:complexType name="restart-task">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 999b7a7..e587f28 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -42,9 +42,11 @@ import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.audit.AuditLogger;
 import org.apache.ambari.server.configuration.Configuration;
@@ -160,9 +162,10 @@ public class UpgradeResourceProviderTest {
 
     EasyMock.replay(configHelper);
 
+    InMemoryDefaultTestModule module = new InMemoryDefaultTestModule();
+
     // create an injector which will inject the mocks
-    injector = Guice.createInjector(Modules.override(
-        new InMemoryDefaultTestModule()).with(new MockModule()));
+    injector = Guice.createInjector(Modules.override(module).with(new MockModule()));
 
     H2DatabaseCleaner.resetSequences(injector);
     injector.getInstance(GuiceJpaInitializer.class);
@@ -250,9 +253,12 @@ public class UpgradeResourceProviderTest {
     sch = component.addServiceComponentHost("h1");
     sch.setVersion("2.1.1.0");
 
+    Configuration configuration = injector.getInstance(Configuration.class);
+    configuration.setProperty("upgrade.parameter.zk-server.timeout", "824");
+
     topologyManager = injector.getInstance(TopologyManager.class);
     StageUtils.setTopologyManager(topologyManager);
-    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+    StageUtils.setConfiguration(configuration);
     ActionManager.setTopologyManager(topologyManager);
     EasyMock.replay(injector.getInstance(AuditLogger.class));
   }
@@ -1650,6 +1656,60 @@ public class UpgradeResourceProviderTest {
         HostRoleStatus.IN_PROGRESS_STATUSES);
   }
 
+  @Test
+  public void testTimeouts() throws Exception {
+    Cluster cluster = clusters.getCluster("c1");
+
+    StackEntity stackEntity = stackDAO.find("HDP", "2.1.1");
+    RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity();
+    repoVersionEntity.setDisplayName("My New Version 3");
+    repoVersionEntity.setOperatingSystems("");
+    repoVersionEntity.setStack(stackEntity);
+    repoVersionEntity.setVersion("2.2.2.3");
+    repoVersionDao.create(repoVersionEntity);
+
+    Map<String, Object> requestProps = new HashMap<>();
+    requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.2.3");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name());
+
+    ResourceProvider upgradeResourceProvider = createProvider(amc);
+
+    Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null);
+    RequestStatus status = upgradeResourceProvider.createResources(request);
+
+
+    Set<Resource> createdResources = status.getAssociatedResources();
+    assertEquals(1, createdResources.size());
+    Resource res = createdResources.iterator().next();
+    Long id = (Long) res.getPropertyValue("Upgrade/request_id");
+    assertNotNull(id);
+    assertEquals(Long.valueOf(1), id);
+
+
+    ActionManager am = injector.getInstance(ActionManager.class);
+
+    List<HostRoleCommand> commands = am.getRequestTasks(id);
+
+    boolean found = false;
+
+    for (HostRoleCommand command : commands) {
+      ExecutionCommandWrapper wrapper = command.getExecutionCommandWrapper();
+
+      if (command.getRole().equals(Role.ZOOKEEPER_SERVER) && command.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND)) {
+        Map<String, String> commandParams = wrapper.getExecutionCommand().getCommandParams();
+        assertTrue(commandParams.containsKey(KeyNames.COMMAND_TIMEOUT));
+        assertEquals("824",commandParams.get(KeyNames.COMMAND_TIMEOUT));
+        found = true;
+      }
+    }
+
+    assertTrue("ZooKeeper timeout override was found", found);
+
+  }
+
   /**
    *
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac75f1da/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
index 8d506bf..037e39a 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
@@ -146,7 +146,7 @@
         </pre-upgrade>
         <pre-downgrade copy-upgrade="true" />
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.zk-server.timeout"/>
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type" />


[02/34] ambari git commit: AMBARI-20578 APPENDUM Log Search Configuration API (mgergely)

Posted by nc...@apache.org.
AMBARI-20578 APPENDUM Log Search Configuration API (mgergely)

Change-Id: I3b8af535da9f9ce43dffff661de74ce1b2f6925c


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/64e88e05
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/64e88e05
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/64e88e05

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 64e88e05d264e4d5a12122b48b724aca48c87d8f
Parents: 310c554
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Apr 12 17:40:39 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Apr 12 17:41:01 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/64e88e05/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index 5aef50a..fec041c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -23,6 +23,7 @@ import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
@@ -131,11 +132,13 @@ public class UpgradeCatalog300Test {
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method showHcatDeletedUserMessage = UpgradeCatalog300.class.getDeclaredMethod("showHcatDeletedUserMessage");
     Method setStatusOfStagesAndRequests = UpgradeCatalog300.class.getDeclaredMethod("setStatusOfStagesAndRequests");
+    Method updateLogSearchConfigs = UpgradeCatalog300.class.getDeclaredMethod("updateLogSearchConfigs");
 
    UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class)
             .addMockedMethod(showHcatDeletedUserMessage)
             .addMockedMethod(addNewConfigurationsFromXml)
             .addMockedMethod(setStatusOfStagesAndRequests)
+            .addMockedMethod(updateLogSearchConfigs)
             .createMock();
 
 
@@ -143,6 +146,8 @@ public class UpgradeCatalog300Test {
     upgradeCatalog300.showHcatDeletedUserMessage();
     upgradeCatalog300.setStatusOfStagesAndRequests();
 
+    upgradeCatalog300.updateLogSearchConfigs();
+    expectLastCall().once();
 
     replay(upgradeCatalog300);
 


[19/34] ambari git commit: AMBARI-20697 Stack advisor code in 2.3 refactor logic for ranger Kafka Plugin (Bharat Viswanadham via dili)

Posted by nc...@apache.org.
AMBARI-20697 Stack advisor code in 2.3 refactor logic for ranger Kafka Plugin (Bharat Viswanadham via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cfde36c0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cfde36c0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cfde36c0

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: cfde36c077de31dda0802f983602f680afcbce29
Parents: 14c1ffd
Author: Di Li <di...@apache.org>
Authored: Thu Apr 13 17:00:56 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Apr 13 17:00:56 2017 -0400

----------------------------------------------------------------------
 .../main/resources/stacks/HDP/2.3/services/stack_advisor.py    | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cfde36c0/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 67532c5..eb7389d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -349,10 +349,10 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       ranger_kafka_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
       putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", ranger_kafka_plugin_enabled)
 
-    # Determine if the Ranger/Kafka Plugin is enabled
-    ranger_plugin_enabled = "RANGER" in servicesList
+
+    ranger_plugin_enabled = False
     # Only if the RANGER service is installed....
-    if ranger_plugin_enabled:
+    if "RANGER" in servicesList:
       # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled,
       # determine if the Ranger/Kafka plug-in enabled enabled or not
       if 'ranger-kafka-plugin-properties' in configurations and \


[29/34] ambari git commit: AMBARI-20761 Update zookeeper.connect description (Bharat Viswanadham via dili)

Posted by nc...@apache.org.
AMBARI-20761 Update zookeeper.connect description (Bharat Viswanadham via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dd3fdc22
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dd3fdc22
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dd3fdc22

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: dd3fdc220d9ad3b2b723a201ce7aa5d767850a58
Parents: 103e49a
Author: Di Li <di...@apache.org>
Authored: Mon Apr 17 11:27:32 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Mon Apr 17 11:27:32 2017 -0400

----------------------------------------------------------------------
 .../common-services/KAFKA/0.8.1/configuration/kafka-broker.xml    | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dd3fdc22/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index 96b7750..e270b84 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -50,8 +50,7 @@
       Zookeeper also allows you to add a "chroot" path which will make all kafka data for this cluster appear under a particular path.
       This is a way to setup multiple Kafka clusters or other applications on the same zookeeper cluster. To do this give a connection
      string in the form hostname1:port1,hostname2:port2,hostname3:port3/chroot/path which would put all this cluster's data under the
-      path /chroot/path. Note that you must create this path yourself prior to starting the broker and consumers must use the
-      same connection string.
+      path /chroot/path. Note that consumers must use the same connection string.
     </description>
     <on-ambari-upgrade add="false"/>
   </property>


[23/34] ambari git commit: AMBARI-20628. Ambari doesn't set properties correctly (magyari_sandor)

Posted by nc...@apache.org.
AMBARI-20628. Ambari doesn't set properties correctly (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bf637950
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bf637950
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bf637950

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: bf637950901f2b0f213c1cc149f5e49b8a8d0968
Parents: c57300a
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Thu Apr 13 17:04:14 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Fri Apr 14 10:20:09 2017 +0200

----------------------------------------------------------------------
 .../StackAdvisorBlueprintProcessor.java         | 55 ++++----------------
 .../topology/ClusterConfigurationRequest.java   |  8 +--
 .../StackAdvisorBlueprintProcessorTest.java     |  2 -
 3 files changed, 16 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bf637950/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
index 0abcc14..d306e25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
@@ -32,10 +32,8 @@ import org.apache.ambari.server.controller.internal.ConfigurationTopologyExcepti
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
-import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
-import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.slf4j.Logger;
@@ -75,13 +73,13 @@ public class StackAdvisorBlueprintProcessor {
   /**
    * Recommend configurations by the stack advisor, then store the results in cluster topology.
    * @param clusterTopology cluster topology instance
-   * @param existingConfigurations Existing configurations of cluster
+   * @param userProvidedConfigurations User configurations of cluster provided in Blueprint + Cluster template
    */
-  public void adviseConfiguration(ClusterTopology clusterTopology, Map<String, Map<String, String>> existingConfigurations) throws ConfigurationTopologyException {
+  public void adviseConfiguration(ClusterTopology clusterTopology, Map<String, Map<String, String>> userProvidedConfigurations) throws ConfigurationTopologyException {
     StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, StackAdvisorRequestType.CONFIGURATIONS);
     try {
       RecommendationResponse response = stackAdvisorHelper.recommend(request);
-      addAdvisedConfigurationsToTopology(response, clusterTopology, existingConfigurations);
+      addAdvisedConfigurationsToTopology(response, clusterTopology, userProvidedConfigurations);
     } catch (StackAdvisorException e) {
       throw new ConfigurationTopologyException(RECOMMENDATION_FAILED, e);
     } catch (IllegalArgumentException e) {
@@ -94,7 +92,7 @@ public class StackAdvisorBlueprintProcessor {
     Map<String, Set<String>> hgComponentsMap = gatherHostGroupComponents(clusterTopology);
     Map<String, Set<String>> hgHostsMap = gatherHostGroupBindings(clusterTopology);
     Map<String, Set<String>> componentHostsMap = gatherComponentsHostsMap(hgComponentsMap,
-      hgHostsMap);
+            hgHostsMap);
     return StackAdvisorRequest.StackAdvisorRequestBuilder
       .forStack(stack.getName(), stack.getVersion())
       .forServices(new ArrayList<>(clusterTopology.getBlueprint().getServices()))
@@ -167,7 +165,7 @@ public class StackAdvisorBlueprintProcessor {
   }
 
   private void addAdvisedConfigurationsToTopology(RecommendationResponse response,
-                                                  ClusterTopology topology, Map<String, Map<String, String>> existingConfigurations) {
+                                                  ClusterTopology topology, Map<String, Map<String, String>> userProvidedConfigurations) {
     Preconditions.checkArgument(response.getRecommendations() != null,
       "Recommendation response is empty.");
     Preconditions.checkArgument(response.getRecommendations().getBlueprint() != null,
@@ -175,67 +173,36 @@ public class StackAdvisorBlueprintProcessor {
     Preconditions.checkArgument(response.getRecommendations().getBlueprint().getConfigurations() != null,
       "Configurations are missing from the recommendation blueprint response.");
 
-    Map<String, Map<String, String>> userProvidedProperties = getUserProvidedProperties(topology, existingConfigurations);
     Map<String, BlueprintConfigurations> recommendedConfigurations =
       response.getRecommendations().getBlueprint().getConfigurations();
     for (Map.Entry<String, BlueprintConfigurations> configEntry : recommendedConfigurations.entrySet()) {
       String configType = configEntry.getKey();
       BlueprintConfigurations blueprintConfig = filterBlueprintConfig(configType, configEntry.getValue(),
-        userProvidedProperties, topology);
+              userProvidedConfigurations, topology);
       topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration(
         blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
     }
   }
 
   /**
-   * Gather user defined properties. (keep that only which is not included in the stack defaults or it overrides the stack default value)
-   */
-  private Map<String, Map<String, String>> getUserProvidedProperties(ClusterTopology topology, Map<String, Map<String, String>> existingConfigurations) {
-    Map<String, Map<String, String>> userProvidedProperties = Maps.newHashMap();
-    Blueprint blueprint = topology.getBlueprint();
-    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
-    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
-
-    for (Map.Entry<String, Map<String, String>> configGroup : existingConfigurations.entrySet()) {
-      String configType = configGroup.getKey();
-      Map<String, String> configsToAdd = Maps.newHashMap();
-      for (Map.Entry<String, String> configProp : configGroup.getValue().entrySet()) {
-        if (stackDefaultProps.containsKey(configType) && stackDefaultProps.get(configType).containsKey(configProp.getKey())) {
-          String originalValue = stackDefaultProps.get(configType).get(configProp.getKey());
-          if (originalValue != null && !originalValue.equals(configProp.getValue())) {
-            configsToAdd.put(configProp.getKey(), configProp.getValue());
-          }
-        } else {
-          configsToAdd.put(configProp.getKey(), configProp.getValue());
-        }
-      }
-      if (!configsToAdd.isEmpty()) {
-        userProvidedProperties.put(configGroup.getKey(), configsToAdd);
-      }
-    }
-
-    return userProvidedProperties;
-  }
-
-  /**
-   * Remove user defined properties from stack advisor output in case of ONLY_STACK_DEFAULTS_APPLY or
+   * Remove user defined properties from Stack Advisor output in case of ONLY_STACK_DEFAULTS_APPLY or
    * ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES.
    */
   private BlueprintConfigurations filterBlueprintConfig(String configType, BlueprintConfigurations config,
-                                                        Map<String, Map<String, String>> userProvidedProperties,
+                                                        Map<String, Map<String, String>> userProvidedConfigurations,
                                                         ClusterTopology topology) {
     if (topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY ||
       topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy
         .ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES) {
-      if (userProvidedProperties.containsKey(configType)) {
+      if (userProvidedConfigurations.containsKey(configType)) {
         BlueprintConfigurations newConfig = new BlueprintConfigurations();
         Map<String, String> filteredProps = Maps.filterKeys(config.getProperties(),
-          Predicates.not(Predicates.in(userProvidedProperties.get(configType).keySet())));
+          Predicates.not(Predicates.in(userProvidedConfigurations.get(configType).keySet())));
         newConfig.setProperties(Maps.newHashMap(filteredProps));
 
         if (config.getPropertyAttributes() != null) {
           Map<String, ValueAttributesInfo> filteredAttributes = Maps.filterKeys(config.getPropertyAttributes(),
-            Predicates.not(Predicates.in(userProvidedProperties.get(configType).keySet())));
+            Predicates.not(Predicates.in(userProvidedConfigurations.get(configType).keySet())));
           newConfig.setPropertyAttributes(Maps.newHashMap(filteredAttributes));
         }
         return newConfig;

http://git-wip-us.apache.org/repos/asf/ambari/blob/bf637950/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index 5913f4b..0e7d70b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -137,17 +137,19 @@ public class ClusterConfigurationRequest {
     // this will update the topo cluster config and all host group configs in the cluster topology
     Set<String> updatedConfigTypes = new HashSet<>();
 
-    Configuration clusterConfiguration = clusterTopology.getConfiguration();
-    Map<String, Map<String, String>> existingConfigurations = clusterConfiguration.getFullProperties();
+    Map<String, Map<String, String>> userProvidedConfigurations = clusterTopology.getConfiguration().getFullProperties(1);
 
     try {
       if (configureSecurity) {
+        Configuration clusterConfiguration = clusterTopology.getConfiguration();
+        Map<String, Map<String, String>> existingConfigurations = clusterConfiguration.getFullProperties();
         updatedConfigTypes.addAll(configureKerberos(clusterConfiguration, existingConfigurations));
       }
 
       // obtain recommended configurations before config updates
       if (!ConfigRecommendationStrategy.NEVER_APPLY.equals(this.clusterTopology.getConfigRecommendationStrategy())) {
-        stackAdvisorBlueprintProcessor.adviseConfiguration(this.clusterTopology, existingConfigurations);
+        // get merged properties form Blueprint & cluster template (this doesn't contains stack default values)
+        stackAdvisorBlueprintProcessor.adviseConfiguration(this.clusterTopology, userProvidedConfigurations);
       }
 
       updatedConfigTypes.addAll(configurationProcessor.doUpdateForClusterCreate());

http://git-wip-us.apache.org/repos/asf/ambari/blob/bf637950/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
index 49f070a..3c5f8ed 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
@@ -165,9 +165,7 @@ public class StackAdvisorBlueprintProcessorTest {
     underTest.adviseConfiguration(clusterTopology, props);
     // THEN
     assertTrue(advisedConfigurations.get("core-site").getProperties().containsKey("dummyKey1"));
-    assertTrue(advisedConfigurations.get("core-site").getProperties().containsKey("dummyKey3"));
     assertTrue(advisedConfigurations.get("core-site").getPropertyValueAttributes().containsKey("dummyKey2"));
-    assertTrue(advisedConfigurations.get("core-site").getPropertyValueAttributes().containsKey("dummyKey3"));
     assertEquals("dummyValue", advisedConfigurations.get("core-site").getProperties().get("dummyKey1"));
     assertEquals(Boolean.toString(true), advisedConfigurations.get("core-site")
       .getPropertyValueAttributes().get("dummyKey2").getDelete());


[32/34] ambari git commit: AMBARI-18865. Kafka still showing log.cleanup.interval.mins (Bharat Viswanadham via alejandro)

Posted by nc...@apache.org.
AMBARI-18865. Kafka still showing log.cleanup.interval.mins (Bharat Viswanadham via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6806d38b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6806d38b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6806d38b

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 6806d38b8526f5229a6ed4a56a4492f2b2ab27c2
Parents: 1941eed
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Apr 17 10:51:40 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Apr 17 10:51:40 2017 -0700

----------------------------------------------------------------------
 .../KAFKA/0.8.1/configuration/kafka-broker.xml              | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6806d38b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index e270b84..f2b4a76 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -157,6 +157,15 @@
     <value>10</value>
     <description>The frequency in minutes that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
     </description>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>
+      The frequency in milliseconds that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>


[10/34] ambari git commit: AMBARI-20719.Need to show decision conditions for decision node in Flow Graph tab(M Madhan Mohan Reddy via venkatasairam.lanka)

Posted by nc...@apache.org.
AMBARI-20719.Need to show decision conditions for decision node in Flow Graph tab(M Madhan Mohan Reddy via venkatasairam.lanka)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a5dc75ec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a5dc75ec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a5dc75ec

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: a5dc75ecee205c5cb8a1600c0e00232426256209
Parents: 3e01abd
Author: Venkata Sairam <ve...@gmail.com>
Authored: Thu Apr 13 16:58:13 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Thu Apr 13 16:58:13 2017 +0530

----------------------------------------------------------------------
 .../resources/ui/app/components/job-details.js  | 30 +++++++++++++++++++-
 .../components/workflow-job-details.hbs         |  6 ++++
 2 files changed, 35 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a5dc75ec/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
index e74c873..70ba41c 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
@@ -327,7 +327,18 @@ export default Ember.Component.extend({
                   return "none";
                 }
               },
-              'target-arrow-color': 'data(borderColor)'
+              'target-arrow-color': 'data(borderColor)',
+              'color': '#262626',
+              'font-size': 12,
+              label: function(target) {
+                if (!target.data().transition || !target.data().transition.condition) {
+                  return "";
+                }else if (target.data().transition.condition.length>5){
+                  return target.data().transition.condition.slice(0, 5)+"...";
+                }else{
+                  return target.data().transition.condition;
+                }
+              }
             }
           }
         ],
@@ -354,6 +365,23 @@ export default Ember.Component.extend({
         var node = event.cyTarget;
         this.showActionNodeDetail(node, xmlString);
       }.bind(this));
+
+      cy.on('mousemove', 'edge', function(event) {
+        this.get("context").$(".overlay-transition-content, .decision-condition-label").hide();
+        if (event.cyTarget.data().transition && event.cyTarget.data().transition.condition) {
+          this.get("context").$(".decision-condition-body").html(event.cyTarget.data().transition.condition);
+          this.get("context").$(".overlay-transition-content").css({
+            top: event.originalEvent.offsetY + 10,
+            left: event.originalEvent.offsetX + 15
+          });
+          this.get("context").$(".overlay-transition-content, .decision-condition-label").show();
+        }
+      }.bind(this));
+
+      cy.on('mouseout', 'edge',function(event) {
+        this.get("context").$(".overlay-transition-content").hide();
+      }.bind(this));
+
       this.set("model.inProgress", false);
     },
     importSampleWorkflow (){

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5dc75ec/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
index 0c5257e..9c940a2 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
@@ -203,6 +203,12 @@
               <div class="cy-note"><div class="pull-right">Click on node to get details</div></div>
             {{/if}}
             <div id="cy" class="cy-panel"></div>
+            <div class="overlay-transition-content">
+              <div class="decision-condition-label">
+                <div class="decision-condition-header">Condition</div>
+                <div class="decision-condition-body"></div>
+              </div>
+            </div>
           </div>
           <div class="col-xs-4">
             {{#if model.nodeName}}


[25/34] ambari git commit: AMBARI-20756. Update docker volumes for Ambari logsearch integration test module (oleewere)

Posted by nc...@apache.org.
AMBARI-20756. Update docker volumes for Ambari logsearch integration test module (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/86c30589
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/86c30589
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/86c30589

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 86c305897b054a50a36e63a5c98ea5d2c47890be
Parents: e9cf9dd
Author: oleewere <ol...@gmail.com>
Authored: Thu Apr 13 13:57:25 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Fri Apr 14 13:05:12 2017 +0200

----------------------------------------------------------------------
 .../ambari/logsearch/steps/LogSearchDockerSteps.java  | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/86c30589/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
index 5f8f9bf..91e0b10 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
@@ -81,14 +81,16 @@ public class LogSearchDockerSteps {
       Volume testConfigVolume = new Volume("/root/test-config");
       Volume ambariVolume = new Volume("/root/ambari");
       Volume logfeederClassesVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-logfeeder/target/package/classes");
-      Volume logsearchClassesVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-portal/target/package/classes");
-      Volume logsearchWebappVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-portal/target/package/classes/webapps/app");
+      Volume logsearchClassesVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-server/target/package/classes");
+      Volume logsearchWebappVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-server/target/package/classes/webapps/app");
+      Volume logsearchWebappLibsVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-server/target/package/classes/webapps/app/libs/bower");
       Bind testLogsBind = new Bind(ambariFolder +"/ambari-logsearch/docker/test-logs", testLogsVolume);
       Bind testConfigBind = new Bind(ambariFolder +"/ambari-logsearch/docker/test-config", testConfigVolume);
       Bind ambariRootBind = new Bind(ambariFolder, ambariVolume);
       Bind logfeederClassesBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-logfeeder/target/classes", logfeederClassesVolume);
-      Bind logsearchClassesBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-portal/target/classes", logsearchClassesVolume);
-      Bind logsearchWebappBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-portal/src/main/webapp", logsearchWebappVolume);
+      Bind logsearchClassesBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-server/target/classes", logsearchClassesVolume);
+      Bind logsearchWebappBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-web/src/main/webapp", logsearchWebappVolume);
+      Bind logsearchWebappLibsBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-web/target/libs", logsearchWebappLibsVolume);
 
       // port bindings
       Ports ports = new Ports();
@@ -102,8 +104,8 @@ public class LogSearchDockerSteps {
       CreateContainerResponse createResponse = dockerClient.createContainerCmd("ambari-logsearch:v1.0")
         .withHostName("logsearch.apache.org")
         .withName("logsearch")
-        .withVolumes(testLogsVolume, testConfigVolume, ambariVolume, logfeederClassesVolume, logsearchClassesVolume, logsearchWebappVolume)
-        .withBinds(testLogsBind, testConfigBind, ambariRootBind, logfeederClassesBind, logsearchClassesBind, logsearchWebappBind)
+        .withVolumes(testLogsVolume, testConfigVolume, ambariVolume, logfeederClassesVolume, logsearchClassesVolume, logsearchWebappVolume, logsearchWebappLibsVolume)
+        .withBinds(testLogsBind, testConfigBind, ambariRootBind, logfeederClassesBind, logsearchClassesBind, logsearchWebappBind, logsearchWebappLibsBind)
         .withExposedPorts(
           new ExposedPort(StoryDataRegistry.INSTANCE.getLogsearchPort()),
           new ExposedPort(5005),


[13/34] ambari git commit: AMBARI-20682. Wait For DataNodes To Shutdown During a Rolling Upgrade (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-20682. Wait For DataNodes To Shutdown During a Rolling Upgrade (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/273dfcac
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/273dfcac
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/273dfcac

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 273dfcac0296ebfaebd21484ea887acfe2a02067
Parents: ac75f1d
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Apr 13 17:36:19 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Apr 13 17:36:19 2017 +0300

----------------------------------------------------------------------
 .../libraries/script/script.py                  | 61 +++++++++++++++---
 .../HIVE/package/scripts/mysql_service.py       |  5 ++
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  | 45 ++++++++++++-
 .../package/scripts/datanode_upgrade.py         | 38 +----------
 .../HIVE/package/scripts/mysql_service.py       |  5 +-
 .../HIVE/package/scripts/postgresql_service.py  |  5 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 66 ++++++++++++++++----
 7 files changed, 164 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 5fa9ec4..bad09d2 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -28,6 +28,7 @@ import logging
 import platform
 import inspect
 import tarfile
+import time
 from optparse import OptionParser
 import resource_management
 from ambari_commons import OSCheck, OSConst
@@ -308,21 +309,36 @@ class Script(object):
       method = self.choose_method_to_execute(self.command_name)
       with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
         env.config.download_path = Script.tmp_dir
-        
-        if self.command_name == "start" and not self.is_hook():
-          self.pre_start()
+
+        if not self.is_hook():
+          self.execute_prefix_function(self.command_name, 'pre', env)
         
         method(env)
 
-        if self.command_name == "start" and not self.is_hook():
-          self.post_start()
+        if not self.is_hook():
+          self.execute_prefix_function(self.command_name, 'post', env)
+
     except Fail as ex:
       ex.pre_raise()
       raise
     finally:
       if self.should_expose_component_version(self.command_name):
         self.save_component_version_to_structured_out()
-        
+
+  def execute_prefix_function(self, command_name, afix, env):
+    """
+    Execute action afix (prefix or suffix) based on command_name and afix type
+    example: command_name=start, afix=pre will result in execution of self.pre_start(env) if exists
+    """
+    self_methods = dir(self)
+    method_name = "{0}_{1}".format(afix, command_name)
+    if not method_name in self_methods:
+      Logger.logger.debug("Action afix '{0}' not present".format(method_name))
+      return
+    Logger.logger.debug("Execute action afix: {0}".format(method_name))
+    method = getattr(self, method_name)
+    method(env)
+
   def is_hook(self):
     from resource_management.libraries.script.hook import Hook
     return (Hook in self.__class__.__bases__)
@@ -335,8 +351,11 @@ class Script(object):
 
   def get_pid_files(self):
     return []
-        
-  def pre_start(self):
+
+  def pre_start(self, env=None):
+    """
+    Executed before any start method. Posts contents of relevant *.out files to command execution log.
+    """
     if self.log_out_files:
       log_folder = self.get_log_folder()
       user = self.get_user()
@@ -366,6 +385,32 @@ class Script(object):
 
     Logger.info("Component has started with pid(s): {0}".format(', '.join(pids)))
 
+  def post_stop(self, env):
+    """
+    Executed after completion of every stop method. Waits until component is actually stopped (check is performed using
+     components status() method.
+    """
+    self_methods = dir(self)
+
+    if not 'status' in self_methods:
+      pass
+    status_method = getattr(self, 'status')
+    component_is_stopped = False
+    counter = 0
+    while not component_is_stopped :
+      try:
+        if counter % 100 == 0:
+          Logger.logger.info("Waiting for actual component stop")
+        status_method(env)
+        time.sleep(0.1)
+        counter += 1
+      except ComponentIsNotRunning, e:
+        Logger.logger.debug("'status' reports ComponentIsNotRunning")
+        component_is_stopped = True
+      except ClientComponentHasNoStatus, e:
+        Logger.logger.debug("Client component has no status")
+        component_is_stopped = True
+
   def choose_method_to_execute(self, command_name):
     """
     Returns a callable object that should be executed for a given command.

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py b/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
index 4716343..cf1d30e 100644
--- a/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
+++ b/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
@@ -31,6 +31,11 @@ def mysql_service(daemon_name=None, action='start'):
   elif action == 'status':
     cmd = format('service {daemon_name} status')
     logoutput = False
+    try:
+      Execute(cmd)
+      return
+    except:
+      raise ComponentIsNotRunning()
   else:
     cmd = None
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 924eea4..cd52885 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -21,10 +21,13 @@ import datanode_upgrade
 from ambari_commons.constants import UPGRADE_TYPE_ROLLING
 
 from hdfs_datanode import datanode
+from resource_management import Script, Fail, shell, Logger
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.decorator import retry
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
@@ -32,6 +35,7 @@ from hdfs import hdfs
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
+from utils import get_dfsadmin_base_command
 
 class DataNode(Script):
 
@@ -75,12 +79,51 @@ class DataNode(Script):
         datanode(action="stop")
     else:
       datanode(action="stop")
+    # verify that the datanode is down
+    self.check_datanode_shutdown(hdfs_binary)
 
   def status(self, env):
     import status_params
     env.set_params(status_params)
     datanode(action = "status")
 
+  @retry(times=24, sleep_time=5, err_class=Fail)
+  def check_datanode_shutdown(self, hdfs_binary):
+    """
+    Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
+    several times, pausing in between runs. Once the DataNode stops responding
+    this method will return, otherwise it will raise a Fail(...) and retry
+    automatically.
+    The stack defaults for retrying for HDFS are also way too slow for this
+    command; they are set to wait about 45 seconds between client retries. As
+    a result, a single execution of dfsadmin will take 45 seconds to retry and
+    the DataNode may be marked as dead, causing problems with HBase.
+    https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
+    times for ipc.client.connect.retry.interval. In the meantime, override them
+    here, but only for RU.
+    :param hdfs_binary: name/path of the HDFS binary to use
+    :return:
+    """
+    import params
+
+    # override stock retry timeouts since after 30 seconds, the datanode is
+    # marked as dead and can affect HBase during RU
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
+
+    is_datanode_deregistered = False
+    try:
+      shell.checked_call(command, user=params.hdfs_user, tries=1)
+    except:
+      is_datanode_deregistered = True
+
+    if not is_datanode_deregistered:
+      Logger.info("DataNode has not yet deregistered from the NameNode...")
+      raise Fail('DataNode has not yet deregistered from the NameNode...')
+
+    Logger.info("DataNode has successfully shutdown.")
+    return True
+
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class DataNodeDefault(DataNode):

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
index b55237d..c1b0296 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
@@ -48,10 +48,7 @@ def pre_rolling_upgrade_shutdown(hdfs_binary):
   command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')
 
   code, output = shell.call(command, user=params.hdfs_user)
-  if code == 0:
-    # verify that the datanode is down
-    _check_datanode_shutdown(hdfs_binary)
-  else:
+  if code != 0:
     # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
     if output is not None and re.search("Shutdown already in progress", output):
       Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
@@ -84,39 +81,6 @@ def is_datanode_process_running():
   except ComponentIsNotRunning:
     return False
 
-@retry(times=24, sleep_time=5, err_class=Fail)
-def _check_datanode_shutdown(hdfs_binary):
-  """
-  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
-  several times, pausing in between runs. Once the DataNode stops responding
-  this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  The stack defaults for retrying for HDFS are also way too slow for this
-  command; they are set to wait about 45 seconds between client retries. As
-  a result, a single execution of dfsadmin will take 45 seconds to retry and
-  the DataNode may be marked as dead, causing problems with HBase.
-  https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
-  times for ipc.client.connect.retry.interval. In the meantime, override them
-  here, but only for RU.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return:
-  """
-  import params
-
-  # override stock retry timeouts since after 30 seconds, the datanode is
-  # marked as dead and can affect HBase during RU
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
-
-  try:
-    Execute(command, user=params.hdfs_user, tries=1)
-  except:
-    Logger.info("DataNode has successfully shutdown for upgrade.")
-    return
-
-  Logger.info("DataNode has not shutdown.")
-  raise Fail('DataNode has not shutdown.')
-
 
 @retry(times=30, sleep_time=30, err_class=Fail) # keep trying for 15 mins
 def _check_datanode_startup(hdfs_binary):

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
index 11bbdd8..a4f3bbb 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
@@ -26,7 +26,10 @@ def mysql_service(daemon_name=None, action='start'):
   cmd = format('service {daemon_name} {action}')
 
   if action == 'status':
-    Execute(status_cmd)
+    try:
+      Execute(status_cmd)
+    except:
+      raise ComponentIsNotRunning()
   elif action == 'stop':
     Execute(cmd,
             logoutput = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
index cc7b4cc..41fe107 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
@@ -26,7 +26,10 @@ def postgresql_service(postgresql_daemon_name=None, action='start'):
   cmd = format('service {postgresql_daemon_name} {action}')
 
   if action == 'status':
-    Execute(status_cmd)
+    try:
+      Execute(status_cmd)
+    except:
+      raise ComponentIsNotRunning()
   elif action == 'stop':
     Execute(cmd,
             logoutput = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/273dfcac/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 1c3c5b7..2cd35ab 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -22,6 +22,7 @@ import json
 from mock.mock import MagicMock, patch
 from resource_management.libraries.script.script import Script
 from resource_management.core import shell
+import itertools
 from resource_management.core.exceptions import Fail
 import resource_management.libraries.functions.mounted_dirs_helper
 
@@ -76,13 +77,21 @@ class TestDatanode(RMFTestCase):
     )
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_default(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_default(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
                        config_file = "default.json",
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
@@ -221,13 +230,21 @@ class TestDatanode(RMFTestCase):
     )
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
                        config_file = "secured.json",
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
@@ -237,9 +254,15 @@ class TestDatanode(RMFTestCase):
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
     self.assertNoMoreResources()
 
-
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured_HDP22_root(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured_HDP22_root(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
       secured_json = json.load(f)
@@ -251,6 +274,7 @@ class TestDatanode(RMFTestCase):
                        command = "stop",
                        config_dict = secured_json,
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
@@ -260,8 +284,15 @@ class TestDatanode(RMFTestCase):
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured_HDP22_non_root_https_only(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured_HDP22_non_root_https_only(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
       secured_json = json.load(f)
@@ -276,6 +307,7 @@ class TestDatanode(RMFTestCase):
                        command = "stop",
                        config_dict = secured_json,
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
@@ -564,7 +596,7 @@ class TestDatanode(RMFTestCase):
 
   @patch("resource_management.core.shell.call")
   @patch('time.sleep')
-  def test_stop_during_upgrade(self, time_mock, call_mock):
+  def test_stop_during_upgrade_not_shutdown(self, time_mock, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     call_mock_side_effects = [(0, ""), ]
     call_mock.side_effects = call_mock_side_effects
@@ -573,7 +605,7 @@ class TestDatanode(RMFTestCase):
 
     version = '2.2.1.0-3242'
     json_content['commandParams']['version'] = version
-
+    mocks_dict={}
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
         classname = "DataNode",
@@ -582,19 +614,23 @@ class TestDatanode(RMFTestCase):
         stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES,
         call_mocks = call_mock_side_effects,
+        checked_call_mocks=itertools.cycle([(0, "OK.")]),
+        mocks_dict = mocks_dict,
         command_args=["rolling"])
 
       raise Fail("Expected a fail since datanode didn't report a shutdown")
     except Exception, err:
-      expected_message = "DataNode has not shutdown."
+      expected_message = "DataNode has not yet deregistered from the NameNode..."
       if str(err.message) != expected_message:
         self.fail("Expected this exception to be thrown. " + expected_message + ". Got this instead, " + str(err.message))
 
-    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
+    self.assertEquals(
+      ('hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
+      mocks_dict['checked_call'].call_args_list[0][0][0])
 
   @patch("resource_management.core.shell.call")
   @patch('time.sleep')
-  def test_stop_during_upgrade(self, time_mock, call_mock):
+  def test_stop_during_upgrade_not_shutdown_ha(self, time_mock, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_default.json"
     call_mock_side_effects = [(0, ""), ]
     call_mock.side_effects = call_mock_side_effects
@@ -603,7 +639,7 @@ class TestDatanode(RMFTestCase):
 
     version = '2.2.1.0-3242'
     json_content['commandParams']['version'] = version
-
+    mocks_dict={}
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                          classname = "DataNode",
@@ -612,15 +648,19 @@ class TestDatanode(RMFTestCase):
                          stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = call_mock_side_effects,
+                         checked_call_mocks=itertools.cycle([(0, "OK.")]),
+                         mocks_dict = mocks_dict,
                          command_args=["rolling"])
 
       raise Fail("Expected a fail since datanode didn't report a shutdown")
     except Exception, err:
-      expected_message = "DataNode has not shutdown."
+      expected_message = "DataNode has not yet deregistered from the NameNode..."
       if str(err.message) != expected_message:
         self.fail("Expected this exception to be thrown. " + expected_message + ". Got this instead, " + str(err.message))
 
-    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
+    self.assertEquals(
+      ('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
+      mocks_dict['checked_call'].call_args_list[0][0][0])
 
   @patch("resource_management.libraries.functions.security_commons.build_expectations")
   @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")


[09/34] ambari git commit: AMBARI-20751. Fix misleading solr-client output messages (oleewere)

Posted by nc...@apache.org.
AMBARI-20751. Fix misleading solr-client output messages (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e01abd1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e01abd1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e01abd1

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 3e01abd19f8a3d9538ab38d35786c3b6fae33fc5
Parents: 52203c3
Author: oleewere <ol...@gmail.com>
Authored: Thu Apr 13 12:41:41 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Thu Apr 13 12:41:41 2017 +0200

----------------------------------------------------------------------
 .../java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e01abd1/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
index d5d971c..9479679 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
@@ -110,7 +110,7 @@ public class AmbariSolrCloudClient {
     List<String> collections = listCollections();
     if (!collections.contains(getCollection())) {
       String collection = new CreateCollectionCommand(getRetryTimes(), getInterval()).run(this);
-      LOG.info("Collection '{}' created.", collection);
+      LOG.info("Collection '{}' creation request sent.", collection);
     } else {
       LOG.info("Collection '{}' already exits.", getCollection());
       if (this.isSplitting()) {
@@ -234,7 +234,7 @@ public class AmbariSolrCloudClient {
       for (String shardName : shardList) {
         if (!existingShards.contains(shardName)) {
           new CreateShardCommand(shardName, getRetryTimes(), getInterval()).run(this);
-          LOG.info("New shard added to collection '{}': {}", getCollection(), shardName);
+          LOG.info("Adding new shard to collection request sent ('{}': {})", getCollection(), shardName);
           existingShards.add(shardName);
         }
       }


[30/34] ambari git commit: AMBARI-20754 get_value_from_jmx constantly prints exception message in retry mechanism, which brings bad user experience (Yuanbo Liu via dili)

Posted by nc...@apache.org.
AMBARI-20754 get_value_from_jmx constantly prints exception message in retry mechanism, which brings bad user experience (Yuanbo Liu via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/22b114de
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/22b114de
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/22b114de

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 22b114defc43858798f6f20211c0d04b440ee7ce
Parents: dd3fdc2
Author: Di Li <di...@apache.org>
Authored: Mon Apr 17 11:36:19 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Mon Apr 17 11:36:19 2017 -0400

----------------------------------------------------------------------
 .../python/resource_management/libraries/functions/jmx.py     | 7 ++++---
 .../libraries/functions/namenode_ha_utils.py                  | 6 +++---
 2 files changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/22b114de/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
index 9a4ff5f..dbd0092 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
@@ -23,7 +23,7 @@ from resource_management.core import shell
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.get_user_call_output import get_user_call_output
 
-def get_value_from_jmx(qry, property, security_enabled, run_user, is_https_enabled):
+def get_value_from_jmx(qry, property, security_enabled, run_user, is_https_enabled, last_retry=True):
   try:
     if security_enabled:
       cmd = ['curl', '--negotiate', '-u', ':', '-s']
@@ -41,5 +41,6 @@ def get_value_from_jmx(qry, property, security_enabled, run_user, is_https_enabl
       data_dict = json.loads(data)
       return data_dict["beans"][0][property]
   except:
-    Logger.logger.exception("Getting jmx metrics from NN failed. URL: " + str(qry))
-    return None
\ No newline at end of file
+    if last_retry:
+      Logger.logger.exception("Getting jmx metrics from NN failed. URL: " + str(qry))
+    return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/22b114de/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index 665a8e4..8a2ff25 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -51,7 +51,7 @@ def get_namenode_states(hdfs_site, security_enabled, run_user, times=10, sleep_t
   @retry(times=times, sleep_time=sleep_time, backoff_factor=backoff_factor, err_class=Fail)
   def doRetries(hdfs_site, security_enabled, run_user):
     doRetries.attempt += 1
-    active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states_noretries(hdfs_site, security_enabled, run_user)
+    active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states_noretries(hdfs_site, security_enabled, run_user, doRetries.attempt == times)
     Logger.info(
       "NameNode HA states: active_namenodes = {0}, standby_namenodes = {1}, unknown_namenodes = {2}".format(
         active_namenodes, standby_namenodes, unknown_namenodes))
@@ -65,7 +65,7 @@ def get_namenode_states(hdfs_site, security_enabled, run_user, times=10, sleep_t
   doRetries.attempt = 0
   return doRetries(hdfs_site, security_enabled, run_user)
 
-def get_namenode_states_noretries(hdfs_site, security_enabled, run_user):
+def get_namenode_states_noretries(hdfs_site, security_enabled, run_user, last_retry=True):
   """
   return format [('nn1', 'hdfs://hostname1:port1'), ('nn2', 'hdfs://hostname2:port2')] , [....], [....]
   """
@@ -102,7 +102,7 @@ def get_namenode_states_noretries(hdfs_site, security_enabled, run_user):
 
       jmx_uri = JMX_URI_FRAGMENT.format(protocol, value)
       
-      state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, run_user, is_https_enabled)
+      state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, run_user, is_https_enabled, last_retry)
       # If JMX parsing failed
       if not state:
         check_service_cmd = "hdfs haadmin -ns {0} -getServiceState {1}".format(get_nameservice(hdfs_site), nn_unique_id)


[05/34] ambari git commit: AMBARI-18423 - Support creating/editing alert dispatch targets for script-based alert dispatchers by web wizard instead of command line (Lei Yao via rzang)

Posted by nc...@apache.org.
AMBARI-18423 - Support creating/editing alert dispatch targets for script-based alert dispatchers by web wizard instead of command line (Lei Yao via rzang)

Change-Id: I417451c88495be07bb68a940a14d37d14b74267f


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/68b7b564
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/68b7b564
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/68b7b564

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 68b7b56479dbc22d16e96570e27616a59c96a98a
Parents: 5ef0c99
Author: Richard Zang <rz...@apache.org>
Authored: Wed Apr 12 14:32:53 2017 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Apr 12 14:32:53 2017 -0700

----------------------------------------------------------------------
 .../manage_alert_notifications_controller.js    |  28 +++-
 ambari-web/app/messages.js                      |   1 +
 .../main/alerts/create_alert_notification.hbs   |  12 ++
 ...anage_alert_notifications_controller_test.js | 165 ++++++++++++++++++-
 4 files changed, 200 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/68b7b564/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
index 73c19c6..df15513 100644
--- a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
+++ b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
@@ -149,6 +149,11 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
       value: '',
       defaultValue: ''
     },
+    scriptDispatchProperty: {
+      label: Em.I18n.t('alerts.actions.manage_alert_notifications_popup.scriptDispatchProperty'),
+      value: '',
+      defaultValue: ''
+    },
     customProperties: Em.A([])
   }),
 
@@ -167,7 +172,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
    * used in Type combobox
    * @type {Array}
    */
-  methods: ['EMAIL', 'SNMP', 'Custom SNMP'],
+  methods: ['EMAIL', 'SNMP', 'Custom SNMP', 'Alert Script'],
 
   /**
    * List of available value for Severity Filter
@@ -283,7 +288,8 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
     'mail.smtp.from',
     'mail.smtp.host',
     'mail.smtp.port',
-    'mail.smtp.starttls.enable'
+    'mail.smtp.starttls.enable',
+    'ambari.dispatch-property.script'
   ],
 
   validationMap: {
@@ -332,7 +338,8 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
         errorKey: 'hostError',
         validator: 'hostsValidation'
       }
-    ]
+    ],
+    AlertScript:[]
   },
 
   /**
@@ -427,6 +434,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
     inputFields.set('severityFilter.value', selectedAlertNotification.get('alertStates'));
     inputFields.set('global.value', selectedAlertNotification.get('global'));
     inputFields.set('allGroups.value', selectedAlertNotification.get('global') ? 'all' : 'custom');
+    inputFields.set('scriptDispatchProperty.value', properties['ambari.dispatch-property.script'] || '');
     // not allow to edit global field
     inputFields.set('global.disabled', true);
     inputFields.set('description.value', selectedAlertNotification.get('description'));
@@ -478,6 +486,8 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
 
         isCustomSNMPMethodSelected: Em.computed.equal('controller.inputFields.method.value', 'Custom SNMP'),
 
+        isAlertScriptMethodSelected: Em.computed.equal('controller.inputFields.method.value', 'Alert Script'),
+
         methodObserver: function () {
           var currentMethod = this.get('controller.inputFields.method.value'),
             validationMap = self.get('validationMap');
@@ -557,7 +567,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
         hostsValidation: function() {
           var inputValue = this.get('controller.inputFields.host.value').trim(),
             hostError = false;
-          if (!this.get('isEmailMethodSelected')) {
+          if (!this.get('isEmailMethodSelected') && !this.get('isAlertScriptMethodSelected')) {
             var array = inputValue.split(',');
             hostError = array.some(function(hostname) {
               return hostname && !validator.isHostname(hostname.trim());
@@ -757,7 +767,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
       properties['ambari.dispatch.snmp.community'] = inputFields.get('community.value');
       properties['ambari.dispatch.recipients'] = inputFields.get('host.value').replace(/\s/g, '').split(',');
       properties['ambari.dispatch.snmp.port'] = inputFields.get('port.value');
-    } else {
+    } else if(inputFields.get('method.value') === 'Custom SNMP') {
       properties['ambari.dispatch.snmp.version'] = inputFields.get('version.value');
       properties['ambari.dispatch.snmp.oids.trap'] = inputFields.get('OIDs.value');
       properties['ambari.dispatch.snmp.oids.subject'] = inputFields.get('OIDs.value');
@@ -765,6 +775,10 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
       properties['ambari.dispatch.snmp.community'] = inputFields.get('community.value');
       properties['ambari.dispatch.recipients'] = inputFields.get('host.value').replace(/\s/g, '').split(',');
       properties['ambari.dispatch.snmp.port'] = inputFields.get('port.value');
+    }else if (inputFields.get('method.value') === 'Alert Script') {
+      var scriptDispatchProperty = inputFields.get('scriptDispatchProperty.value').trim();
+      if( scriptDispatchProperty != '')
+          properties['ambari.dispatch-property.script'] = scriptDispatchProperty;
     }
     inputFields.get('customProperties').forEach(function (customProperty) {
       properties[customProperty.name] = customProperty.value;
@@ -791,6 +805,8 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
       notificationType = "SNMP";
     } else if(notificationType === "SNMP") {
       notificationType = "AMBARI_SNMP";
+    } else if(notificationType === "Alert Script"){
+      notificationType = "ALERT_SCRIPT";
     }
     return notificationType;
   },
@@ -801,6 +817,8 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
       notificationTypeText = "Custom SNMP";
     } else if(notificationType === "AMBARI_SNMP") {
       notificationTypeText = "SNMP";
+    } else if(notificationType === "ALERT_SCRIPT"){
+      notificationTypeText = "Alert Script";
     }
     return notificationTypeText;
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/68b7b564/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index a2edf06..4513775 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2547,6 +2547,7 @@ Em.I18n.translations = {
   'alerts.actions.manage_alert_notifications_popup.confirmDeleteBody':'Are you sure you want to delete {0} notification?',
   'alerts.actions.manage_alert_notifications_popup.error.name.empty': 'Notification name is required',
   'alerts.actions.manage_alert_notifications_popup.error.name.existed': 'Notification name already exists',
+  'alerts.actions.manage_alert_notifications_popup.scriptDispatchProperty':'Script Dispatch Property',
 
   'hosts.host.add':'Add New Hosts',
   'hosts.table.noHosts':'No hosts to display',

http://git-wip-us.apache.org/repos/asf/ambari/blob/68b7b564/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/alerts/create_alert_notification.hbs b/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
index 5b40bca..7ec5b1e 100644
--- a/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
+++ b/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
@@ -315,6 +315,18 @@
     {{/if}}
     {{! alert-notification Custom SNMP end }}
 
+    {{! alert-notification Alert Script }}
+    {{#if view.isAlertScriptMethodSelected}}
+    <div class="form-group">
+      <label class="control-label col-md-2">{{controller.inputFields.scriptDispatchProperty.label}}</label>
+
+      <div class="col-md-10">
+         {{view Em.TextField valueBinding="controller.inputFields.scriptDispatchProperty.value" class="form-control"}}
+      </div>
+    </div>
+    {{/if}}
+    {{! alert-notification Alert Script end}}
+
     {{! alert-notification custom properties }}
     {{#each customProperty in controller.inputFields.customProperties}}
       <div class="form-group">

http://git-wip-us.apache.org/repos/asf/ambari/blob/68b7b564/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
index 31da561..0d58afa 100644
--- a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
+++ b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
@@ -336,6 +336,9 @@ describe('App.ManageAlertNotificationsController', function () {
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -401,6 +404,9 @@ describe('App.ManageAlertNotificationsController', function () {
           value: 'test1@test.test, test2@test.test'
         },
         port: {},
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
@@ -495,6 +501,9 @@ describe('App.ManageAlertNotificationsController', function () {
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -556,6 +565,9 @@ describe('App.ManageAlertNotificationsController', function () {
         port: {
           value: 161
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
@@ -649,6 +661,9 @@ describe('App.ManageAlertNotificationsController', function () {
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -708,12 +723,160 @@ describe('App.ManageAlertNotificationsController', function () {
         port: {
           value: 161
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
       }));
 
-    })
+    });
+
+    it("should map properties from selectedAlertNotification to inputFields - ALERT_SCRIPT", function () {
+
+          controller.set('selectedAlertNotification', Em.Object.create({
+            name: 'test_alert_script',
+            global: true,
+            description: 'test_description',
+            groups: ['test1', 'test2'],
+            type: 'ALERT_SCRIPT',
+            alertStates: ['OK', 'UNKNOWN'],
+            properties: {
+              'ambari.dispatch-property.script': "com.mycompany.dispatch.syslog.script",
+              'customName': 'customValue'
+            }
+          }));
+
+          controller.set('inputFields', Em.Object.create({
+            name: {
+              value: ''
+            },
+            groups: {
+              value: []
+            },
+            global: {
+              value: false
+            },
+            allGroups: {
+              value: false
+            },
+            method: {
+              value: ''
+            },
+            email: {
+              value: ''
+            },
+            severityFilter: {
+              value: []
+            },
+            description: {
+              value: ''
+            },
+            SMTPServer: {
+              value: ''
+            },
+            SMTPPort: {
+              value: ''
+            },
+            SMTPUseAuthentication: {
+              value: ''
+            },
+            SMTPUsername: {
+              value: ''
+            },
+            SMTPPassword: {
+              value: ''
+            },
+            retypeSMTPPassword: {
+              value: ''
+            },
+            SMTPSTARTTLS: {
+              value: ''
+            },
+            emailFrom: {
+              value: ''
+            },
+            version: {
+              value: ''
+            },
+            OIDs: {
+              value: ''
+            },
+            community: {
+              value: ''
+            },
+            host: {
+              value: ''
+            },
+            port: {
+              value: ''
+            },
+            scriptDispatchProperty: {
+              value: ''
+            },
+            customProperties: [
+              {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
+              {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
+            ]
+          }));
+
+          controller.fillEditCreateInputs();
+
+          expect(JSON.stringify(controller.get('inputFields'))).to.equal(JSON.stringify({
+            name: {
+              value: 'test_alert_script'
+            },
+            groups: {
+              value: ['test1', 'test2']
+            },
+            global: {
+              value: true,
+              disabled: true
+            },
+            allGroups: {
+              value: 'all'
+            },
+            method: {
+              value: 'Alert Script'
+            },
+            email: {
+              value: ''
+            },
+            severityFilter: {
+              value: ['OK', 'UNKNOWN']
+            },
+            description: {
+              value: 'test_description'
+            },
+            SMTPServer: {},
+            SMTPPort: {},
+            SMTPUseAuthentication: {
+              value: true
+            },
+            SMTPUsername: {},
+            SMTPPassword: {},
+            retypeSMTPPassword: {},
+            SMTPSTARTTLS: {
+              value: true
+            },
+            emailFrom: {},
+            version: {},
+            OIDs: {},
+            community: {},
+            host: {
+              value: ''
+            },
+            port: {},
+            scriptDispatchProperty: {
+               value: 'com.mycompany.dispatch.syslog.script'
+            },
+            customProperties: [
+              {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
+            ]
+          }));
+
+        });
   });
 
   describe("#showCreateEditPopup()", function () {


[16/34] ambari git commit: AMBARI-20757 - Selecting ignore service checks results in blank dialog box when hitting an error

Posted by nc...@apache.org.
AMBARI-20757 - Selecting ignore service checks results in blank dialog box when hitting an error


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e46412d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e46412d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e46412d

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 7e46412dc694fc78713be079e56d7347a6f2f60c
Parents: 269ac0a
Author: Tim Thorpe <tt...@apache.org>
Authored: Thu Apr 13 11:30:41 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Thu Apr 13 11:30:41 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/messages.js                                |  1 +
 .../main/admin/stack_upgrade/stack_upgrade_wizard.hbs     | 10 +++++++---
 2 files changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e46412d/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 4513775..8f8d981 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1892,6 +1892,7 @@ Em.I18n.translations = {
   'admin.stackUpgrade.dialog.details.hide': "hide details",
   'admin.stackUpgrade.dialog.notActive': "Waiting to execute the next task...",
   'admin.stackUpgrade.dialog.prepareUpgrade.header': "Preparing the Upgrade...",
+  'admin.stackUpgrade.dialog.skipped.failures':'There were automatically skipped failed steps.  Please resolve each failure before continuing with the upgrade.',
   'services.service.start':'Start',
   'services.service.stop':'Stop',
   'services.service.metrics':'Metrics',

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e46412d/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
index f6be5d1..b2e8991 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
@@ -101,9 +101,13 @@
         {{#if view.plainManualItem}}
           <div class="panel panel-default details-box">
             <p class="manual-steps-title"><strong>{{t admin.stackUpgrade.dialog.manual}}</strong></p>
-            {{#each message in view.manualItem.messages}}
-              <p class="manual-steps-content">{{message}}</p>
-            {{/each}}
+            {{#if view.manualItem.messages.length}}
+              {{#each message in view.manualItem.messages}}
+                <p class="manual-steps-content">{{message}}</p>
+              {{/each}}
+            {{else}}
+              <p class="manual-steps-content">{{t admin.stackUpgrade.dialog.skipped.failures}}</p>
+            {{/if}}
             <label class="message">
               {{view App.CheckboxView checkedBinding="view.isManualDone" labelTranslate="admin.stackUpgrade.dialog.manualDone"}}
             </label>


[26/34] ambari git commit: AMBARI-20755 topology configuration type validation on blueprint deployments

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
index 73a80f6..248332c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
@@ -50,8 +50,6 @@ import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.TopologyRequest;
-import org.apache.ambari.server.topology.TopologyValidator;
-import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -112,7 +110,6 @@ public class ProvisionClusterRequestTest {
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -164,7 +161,6 @@ public class ProvisionClusterRequestTest {
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group2
     HostGroupInfo group2Info = hostGroupInfo.get("group2");
@@ -216,7 +212,6 @@ public class ProvisionClusterRequestTest {
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(2, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -367,33 +362,6 @@ public class ProvisionClusterRequestTest {
     new ProvisionClusterRequest(properties, null);
   }
 
-  @Test
-  public void testGetValidators_noDefaultPassword() throws Exception {
-    Map<String, Object> properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME);
-    //properties.put("default_password", "pwd");
-    TopologyRequest request = new ProvisionClusterRequest(properties, null);
-    List<TopologyValidator> validators = request.getTopologyValidators();
-
-    assertEquals(3, validators.size());
-    TopologyValidator pwdValidator = validators.get(0);
-
-    TopologyValidator noDefaultPwdValidator = new RequiredPasswordValidator(null);
-    assertEquals(pwdValidator, noDefaultPwdValidator);
-  }
-
-  @Test
-  public void testGetValidators_defaultPassword() throws Exception {
-    Map<String, Object> properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME);
-    properties.put("default_password", "pwd");
-    TopologyRequest request = new ProvisionClusterRequest(properties, null);
-    List<TopologyValidator> validators = request.getTopologyValidators();
-
-    assertEquals(3, validators.size());
-    TopologyValidator pwdValidator = validators.get(0);
-
-    TopologyValidator defaultPwdValidator = new RequiredPasswordValidator("pwd");
-    assertEquals(pwdValidator, defaultPwdValidator);
-  }
 
   @Test(expected = InvalidTopologyTemplateException.class)
   public void testInvalidPredicateProperty() throws Exception {

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
index 48d1351..01cc48f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
@@ -116,7 +116,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -147,7 +146,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -176,7 +174,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group2
     // host info
@@ -203,7 +200,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group2
     // host info
@@ -226,7 +222,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group3
     // host info
@@ -253,7 +248,6 @@ public class ScaleClusterRequestTest {
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(3, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group
     // host info

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
index a691cbc..6bcd6bc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
@@ -22,16 +22,12 @@ import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
 
 import java.lang.reflect.Field;
@@ -58,7 +54,6 @@ import org.apache.ambari.server.controller.internal.ProvisionAction;
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -67,6 +62,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -84,7 +80,7 @@ import org.powermock.modules.junit4.PowerMockRunner;
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterDeployWithStartOnlyTest {
+public class ClusterDeployWithStartOnlyTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -106,7 +102,6 @@ public class ClusterDeployWithStartOnlyTest {
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -161,6 +156,10 @@ public class ClusterDeployWithStartOnlyTest {
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -290,7 +289,6 @@ public class ClusterDeployWithStartOnlyTest {
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(ProvisionAction.START_ONLY).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -391,7 +389,6 @@ public class ClusterDeployWithStartOnlyTest {
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -401,12 +398,9 @@ public class ClusterDeployWithStartOnlyTest {
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommandInstallComponent3,
-      hostRoleCommandInstallComponent4, hostRoleCommandStartComponent1, hostRoleCommandStartComponent2,
-      serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -419,17 +413,8 @@ public class ClusterDeployWithStartOnlyTest {
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommandInstallComponent3, hostRoleCommandInstallComponent4,
-      hostRoleCommandStartComponent1, hostRoleCommandStartComponent2);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommandInstallComponent3, hostRoleCommandInstallComponent4,
-      hostRoleCommandStartComponent1, hostRoleCommandStartComponent2);
+    verifyAll();
+    resetAll();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
index 98ba592..0631b03 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
@@ -24,16 +24,12 @@ import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -59,7 +55,6 @@ import org.apache.ambari.server.controller.internal.ProvisionAction;
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -68,6 +63,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -85,7 +81,7 @@ import org.powermock.modules.junit4.PowerMockRunner;
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterInstallWithoutStartOnComponentLevelTest {
+public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -107,7 +103,7 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
+  //  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -157,6 +153,9 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -286,7 +285,6 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(INSTALL_AND_START).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -368,7 +366,6 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -378,10 +375,9 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommand, serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -394,15 +390,8 @@ public class ClusterInstallWithoutStartOnComponentLevelTest {
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
+    verifyAll();
+    resetAll();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
index fc7ac27..c074d78 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
@@ -24,16 +24,12 @@ import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -59,7 +55,6 @@ import org.apache.ambari.server.controller.internal.ProvisionAction;
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -68,6 +63,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -85,7 +81,7 @@ import org.powermock.modules.junit4.PowerMockRunner;
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterInstallWithoutStartTest {
+public class ClusterInstallWithoutStartTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -106,8 +102,9 @@ public class ClusterInstallWithoutStartTest {
 
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
+
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
+  //  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -157,6 +154,9 @@ public class ClusterInstallWithoutStartTest {
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -286,7 +286,7 @@ public class ClusterInstallWithoutStartTest {
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
+
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(INSTALL_ONLY).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -335,7 +335,7 @@ public class ClusterInstallWithoutStartTest {
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String)isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) isNull());
     expectLastCall().once();
     expect(ambariContext.getNextRequestId()).andReturn(1L).once();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
@@ -361,7 +361,6 @@ public class ClusterInstallWithoutStartTest {
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -371,10 +370,9 @@ public class ClusterInstallWithoutStartTest {
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommand, serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -387,15 +385,8 @@ public class ClusterInstallWithoutStartTest {
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
+    verifyAll();
+    resetAll();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
index 3ea17b4..606303e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
@@ -19,20 +19,15 @@
 package org.apache.ambari.server.topology;
 
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.notNull;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
-import static org.powermock.api.easymock.PowerMock.createStrictMock;
 import static org.powermock.api.easymock.PowerMock.replay;
 import static org.powermock.api.easymock.PowerMock.reset;
 import static org.powermock.api.easymock.PowerMock.verify;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -56,7 +51,7 @@ public class ClusterTopologyImplTest {
   private static final HostGroup group4 = createNiceMock(HostGroup.class);
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
   private final Map<String, HostGroup> hostGroupMap = new HashMap<>();
-  private final List<TopologyValidator> topologyValidators = new ArrayList<>();
+
   private Configuration configuration;
   private Configuration bpconfiguration;
 
@@ -64,9 +59,9 @@ public class ClusterTopologyImplTest {
   public void setUp() throws Exception {
 
     configuration = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
     bpconfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
-            new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
 
     HostGroupInfo group1Info = new HostGroupInfo("group1");
     HostGroupInfo group2Info = new HostGroupInfo("group2");
@@ -148,7 +143,7 @@ public class ClusterTopologyImplTest {
     verify(blueprint, group1, group2, group3, group4);
     reset(blueprint, group1, group2, group3, group4);
 
-    topologyValidators.clear();
+
     hostGroupInfoMap.clear();
     hostGroupMap.clear();
   }
@@ -157,36 +152,7 @@ public class ClusterTopologyImplTest {
     replay(blueprint, group1, group2, group3, group4);
   }
 
-  @Test(expected = InvalidTopologyException.class)
-  public void testCreate_validatorFails() throws Exception {
-    TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
-
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
-
-    validator.validate((ClusterTopology) notNull());
-    expectLastCall().andThrow(new InvalidTopologyException("test"));
-
-    replayAll();
-    replay(validator);
-    // should throw exception due to validation failure
-    new ClusterTopologyImpl(null, request);
-  }
-
-  @Test
-     public void testCreate_validatorSuccess() throws Exception {
-    TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
-
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
 
-    validator.validate((ClusterTopology) notNull());
-
-    replayAll();
-    replay(validator);
-
-    new ClusterTopologyImpl(null, request);
-  }
 
   @Test(expected = InvalidTopologyException.class)
   public void testCreate_duplicateHosts() throws Exception {
@@ -204,16 +170,11 @@ public class ClusterTopologyImplTest {
   public void test_GetHostAssigmentForComponents() throws Exception {
     TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
 
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
-
-    validator.validate((ClusterTopology) notNull());
-
     replayAll();
-    replay(validator);
 
     new ClusterTopologyImpl(null, request).getHostAssignmentsForComponent("component1");
   }
+
   @Test(expected = InvalidTopologyException.class)
   public void testCreate_NNHAInvaid() throws Exception {
     bpconfiguration.setProperty("hdfs-site", "dfs.nameservices", "val");
@@ -224,6 +185,7 @@ public class ClusterTopologyImplTest {
     new ClusterTopologyImpl(null, request);
     hostGroupInfoMap.get("group4").addHost("host5");
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForStandby() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -234,6 +196,7 @@ public class ClusterTopologyImplTest {
     replayAll();
     new ClusterTopologyImpl(null, request);
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForActive() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -244,6 +207,7 @@ public class ClusterTopologyImplTest {
     replayAll();
     new ClusterTopologyImpl(null, request);
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForStandbyWithActiveAsVariable() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -292,11 +256,6 @@ public class ClusterTopologyImplTest {
     }
 
     @Override
-    public List<TopologyValidator> getTopologyValidators() {
-      return topologyValidators;
-    }
-
-    @Override
     public String getDescription() {
       return "Test Request";
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
index 4c88247..efceef3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
@@ -20,10 +20,6 @@ package org.apache.ambari.server.topology;
 
 import static junit.framework.Assert.assertEquals;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.powermock.api.easymock.PowerMock.createNiceMock;
-import static org.powermock.api.easymock.PowerMock.verify;
 
 import java.util.Arrays;
 import java.util.Collection;
@@ -35,20 +31,37 @@ import java.util.Map;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.TestSubject;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 
 /**
  * Unit tests for RequiredPasswordValidator.
  */
-public class RequiredPasswordValidatorTest {
+public class RequiredPasswordValidatorTest extends EasyMockSupport {
 
-  private static final ClusterTopology topology = createNiceMock(ClusterTopology.class);
-  private static final Blueprint blueprint = createNiceMock(Blueprint.class);
-  private static final Stack stack = createNiceMock(Stack.class);
-  private static final HostGroup group1 = createNiceMock(HostGroup.class);
-  private static final HostGroup group2 = createNiceMock(HostGroup.class);
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+
+  @Mock
+  private ClusterTopology topology;
+
+  @Mock
+  private Blueprint blueprint;
+
+  @Mock
+  private Stack stack;
+
+  @Mock
+  private HostGroup group1;
+
+  @Mock
+  private HostGroup group2;
 
   private static Configuration stackDefaults;
   private static Configuration bpClusterConfig;
@@ -71,30 +84,33 @@ public class RequiredPasswordValidatorTest {
   private static final Collection<Stack.ConfigProperty> service2RequiredPwdConfigs = new HashSet<>();
   private static final Collection<Stack.ConfigProperty> service3RequiredPwdConfigs = new HashSet<>();
 
+  @TestSubject
+  private RequiredPasswordValidator validator = new RequiredPasswordValidator();
+
 
   @Before
   public void setup() {
 
     stackDefaults = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
 
     bpClusterConfig = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), stackDefaults);
+      new HashMap<String, Map<String, Map<String, String>>>(), stackDefaults);
 
     topoClusterConfig = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpClusterConfig);
 
     bpGroup1Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
 
     bpGroup2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
 
     topoGroup1Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpGroup1Config);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpGroup1Config);
 
     topoGroup2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpGroup2Config);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpGroup2Config);
 
     service1RequiredPwdConfigs.clear();
     service2RequiredPwdConfigs.clear();
@@ -149,45 +165,57 @@ public class RequiredPasswordValidatorTest {
     expect(stack.getRequiredConfigurationProperties("service2", PropertyInfo.PropertyType.PASSWORD)).andReturn(service2RequiredPwdConfigs).anyTimes();
     expect(stack.getRequiredConfigurationProperties("service3", PropertyInfo.PropertyType.PASSWORD)).andReturn(service3RequiredPwdConfigs).anyTimes();
 
-    replay(topology, blueprint, stack, group1, group2);
   }
 
   @After
   public void tearDown() {
-    verify(topology, blueprint, stack, group1, group2);
-    reset(topology, blueprint, stack, group1, group2);
+    verifyAll();
+    resetAll();
   }
 
 
   @Test
   public void testValidate_noRequiredProps__noDefaultPwd() throws Exception {
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+    // GIVEN
     // no required pwd properties so shouldn't throw an exception
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
+    // WHEN
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_noRequiredProps__defaultPwd() throws Exception {
-    TopologyValidator validator = new RequiredPasswordValidator("pwd");
-    // no required pwd properties so shouldn't throw an exception
+    // GIVEN
+    expect(topology.getDefaultPassword()).andReturn("pwd");
+    replayAll();
+
+    // WHEN
     validator.validate(topology);
+
   }
 
   @Test(expected = InvalidTopologyException.class)
   public void testValidate_missingPwd__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service1RequiredPwdConfigs.add(pwdProp);
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_missingPwd__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service1RequiredPwdConfigs.add(pwdProp);
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 
@@ -197,62 +225,78 @@ public class RequiredPasswordValidatorTest {
 
   @Test
   public void testValidate_pwdPropertyInTopoGroupConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     topoGroup2Config.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInTopoClusterConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     topoClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInBPGroupConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     bpGroup2Config.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInBPClusterConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     bpClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test(expected = InvalidTopologyException.class)
   public void testValidate_pwdPropertyInStackConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     stackDefaults.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     // because stack config is ignored for validation, an exception should be thrown
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_twoRequiredPwdOneSpecified__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
@@ -260,7 +304,6 @@ public class RequiredPasswordValidatorTest {
 
     topoClusterConfig.getProperties().put("test2-type", Collections.singletonMap("pwdProp2", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 
@@ -271,6 +314,9 @@ public class RequiredPasswordValidatorTest {
 
   @Test
   public void testValidate_twoRequiredPwdTwoSpecified__noDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
@@ -279,7 +325,6 @@ public class RequiredPasswordValidatorTest {
     topoClusterConfig.getProperties().put("test2-type", Collections.singletonMap("pwdProp2", "secret2"));
     topoClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret1"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     // default value should be set
     validator.validate(topology);
 
@@ -290,12 +335,14 @@ public class RequiredPasswordValidatorTest {
 
   @Test
   public void testValidate_multipleMissingPwd__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
     service3RequiredPwdConfigs.add(pwdProp2);
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index 2d5978b..95db56f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -66,6 +66,7 @@ import org.apache.ambari.server.stack.NoSuchStackException;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
@@ -159,6 +160,9 @@ public class TopologyManagerTest {
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorService;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
       new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -279,7 +283,6 @@ public class TopologyManagerTest {
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
 
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
index 745b01b..3308333 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
@@ -122,6 +122,7 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
     EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(Collections.<String>emptyList()).anyTimes();
     EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
@@ -140,9 +141,11 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   public void testShouldValidationPassWhenDefaultsAreUsedAndMsqlComponentIsListed() throws Exception {
     // GIVEN
     Collection<String> blueprintServices = Arrays.asList("HIVE", "HDFS", "MYSQL_SERVER");
+    Collection<String> hiveComponents = Arrays.asList("MYSQL_SERVER");
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
     EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(hiveComponents).anyTimes();
     EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
new file mode 100644
index 0000000..4a70448
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.TestSubject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class StackConfigTypeValidatorTest extends EasyMockSupport {
+
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+
+  @Mock
+  private Configuration clusterConfigurationMock;
+
+  @Mock
+  private Configuration stackConfigurationMock;
+
+  @Mock
+  private Blueprint blueprintMock;
+
+  @Mock
+  private Stack stackMock;
+
+  @Mock
+  private ClusterTopology clusterTopologyMock;
+
+  private Set<String> clusterRequestConfigTypes;
+
+  @TestSubject
+  private StackConfigTypeValidator stackConfigTypeValidator = new StackConfigTypeValidator();
+
+  @Before
+  public void before() {
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(clusterConfigurationMock).anyTimes();
+    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
+
+    EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes();
+  }
+
+  @After
+  public void after() {
+    resetAll();
+  }
+
+
+  @Test(expected = InvalidTopologyException.class)
+  public void testShouldValidationFailWhenUnknownConfigTypeComesIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site")));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // exception is thrown
+
+  }
+
+  @Test
+  public void testShouldValidationPassifNoConfigTypesomeIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Collections.<String>emptyList()));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // no exception is thrown
+
+  }
+
+  @Test(expected = InvalidTopologyException.class)
+  public void testShouldValidationFailIfMultipleInvalidConfigTypesComeIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site-1", "invalid-default")));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // no exception is thrown
+
+  }
+}
\ No newline at end of file


[28/34] ambari git commit: AMBARI-20755 topology configuration type validation on blueprint deployments

Posted by nc...@apache.org.
AMBARI-20755 topology configuration type validation on blueprint deployments


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/103e49a8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/103e49a8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/103e49a8

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 103e49a899b57ff286889b8840e758c53bdaf1e8
Parents: 86c3058
Author: lpuskas <lp...@apache.org>
Authored: Wed Apr 12 18:43:47 2017 +0200
Committer: lpuskas <lp...@apache.org>
Committed: Fri Apr 14 16:01:10 2017 +0200

----------------------------------------------------------------------
 .../internal/ExportBlueprintRequest.java        |   6 -
 .../internal/ProvisionClusterRequest.java       |  21 +-
 .../internal/ScaleClusterRequest.java           |   7 -
 .../ambari/server/topology/ClusterTopology.java |   2 +
 .../server/topology/ClusterTopologyImpl.java    |  37 +-
 .../server/topology/PersistedStateImpl.java     |   5 -
 .../ambari/server/topology/TopologyManager.java |  43 +-
 .../ambari/server/topology/TopologyRequest.java |   8 -
 .../validators/ChainedTopologyValidator.java    |  58 ++
 .../validators/HiveServiceValidator.java        |   2 +-
 .../validators/RequiredPasswordValidator.java   |   6 +-
 .../validators/StackConfigTypeValidator.java    |  64 ++
 .../validators/TopologyValidatorFactory.java    |  34 +
 .../validators/TopologyValidatorService.java    |  52 ++
 .../BlueprintConfigurationProcessorTest.java    | 660 ++++++++++---------
 .../internal/ProvisionClusterRequestTest.java   |  32 -
 .../internal/ScaleClusterRequestTest.java       |   6 -
 .../ClusterDeployWithStartOnlyTest.java         |  37 +-
 ...InstallWithoutStartOnComponentLevelTest.java |  33 +-
 .../ClusterInstallWithoutStartTest.java         |  37 +-
 .../topology/ClusterTopologyImplTest.java       |  57 +-
 .../topology/RequiredPasswordValidatorTest.java | 113 +++-
 .../server/topology/TopologyManagerTest.java    |   5 +-
 .../validators/HiveServiceValidatorTest.java    |   3 +
 .../StackConfigTypeValidatorTest.java           | 126 ++++
 25 files changed, 850 insertions(+), 604 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
index f24c138..19d9141 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
@@ -47,7 +47,6 @@ import org.apache.ambari.server.topology.HostGroupImpl;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.TopologyRequest;
-import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -114,11 +113,6 @@ public class ExportBlueprintRequest implements TopologyRequest {
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return Collections.emptyList();
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Export Command For Cluster '%s'", clusterName);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
index 1a14b01..de7883d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
@@ -19,7 +19,6 @@ package org.apache.ambari.server.controller.internal;
 
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -36,17 +35,12 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.NoSuchBlueprintException;
 import org.apache.ambari.server.topology.SecurityConfiguration;
-import org.apache.ambari.server.topology.TopologyValidator;
-import org.apache.ambari.server.topology.validators.ClusterConfigTypeValidator;
-import org.apache.ambari.server.topology.validators.HiveServiceValidator;
-import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Enums;
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
 
 /**
  * Request for provisioning a cluster.
@@ -146,8 +140,6 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
 
   private final String quickLinksProfileJson;
 
-  private final List<TopologyValidator> topologyValidators;
-
   private final static Logger LOG = LoggerFactory.getLogger(ProvisionClusterRequest.class);
 
   /**
@@ -197,9 +189,6 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
     } catch (QuickLinksProfileEvaluationException ex) {
       throw new InvalidTopologyTemplateException("Invalid quick links profile", ex);
     }
-
-    topologyValidators = ImmutableList.of(new RequiredPasswordValidator(defaultPassword),
-      new ClusterConfigTypeValidator(), new HiveServiceValidator());
   }
 
   private String processQuickLinksProfile(Map<String, Object> properties) throws QuickLinksProfileEvaluationException {
@@ -273,11 +262,6 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return topologyValidators;
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Provision Cluster '%s'", clusterName);
   }
@@ -480,4 +464,9 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   public String getQuickLinksProfileJson() {
     return quickLinksProfileJson;
   }
+
+  public String getDefaultPassword() {
+    return defaultPassword;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
index b5d2f9d..2a91bfe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
@@ -20,7 +20,6 @@
 package org.apache.ambari.server.controller.internal;
 
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -30,7 +29,6 @@ import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
-import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,11 +88,6 @@ public class ScaleClusterRequest extends BaseClusterRequest {
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return Collections.emptyList();
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Scale Cluster '%s' (+%s hosts)", clusterName, getTotalRequestedHostCount());
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
index e37c68d..639c406 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
@@ -178,4 +178,6 @@ public interface ClusterTopology {
    */
   void removeHost(String hostname);
 
+  String getDefaultPassword();
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index 37fb7d4..2ea904e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -26,13 +26,13 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.internal.ProvisionAction;
+import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,6 +54,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
   private Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<>();
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
   private final AmbariContext ambariContext;
+  private final String defaultPassword;
 
   private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class);
 
@@ -65,26 +66,16 @@ public class ClusterTopologyImpl implements ClusterTopology {
     // provision cluster currently requires that all hostgroups have same BP so it is ok to use root level BP here
     this.blueprint = topologyRequest.getBlueprint();
     this.configuration = topologyRequest.getConfiguration();
+    if (topologyRequest instanceof ProvisionClusterRequest) {
+      this.defaultPassword = ((ProvisionClusterRequest) topologyRequest).getDefaultPassword();
+    } else {
+      this.defaultPassword = null;
+    }
 
     registerHostGroupInfo(topologyRequest.getHostGroupInfo());
 
-    validateTopology(topologyRequest.getTopologyValidators());
-    this.ambariContext = ambariContext;
-  }
-
-  //todo: only used in tests, remove.  Validators not invoked when this constructor is used.
-  public ClusterTopologyImpl(AmbariContext ambariContext,
-                             Long clusterId,
-                             Blueprint blueprint,
-                             Configuration configuration,
-                             Map<String, HostGroupInfo> hostGroupInfo)
-                                throws InvalidTopologyException {
-
-    this.clusterId = clusterId;
-    this.blueprint = blueprint;
-    this.configuration = configuration;
-
-    registerHostGroupInfo(hostGroupInfo);
+    // todo extract validation to specialized service
+    validateTopology();
     this.ambariContext = ambariContext;
   }
 
@@ -213,12 +204,9 @@ public class ClusterTopologyImpl implements ClusterTopology {
       && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true");
   }
 
-  private void validateTopology(List<TopologyValidator> validators)
+  private void validateTopology()
       throws InvalidTopologyException {
 
-    for (TopologyValidator validator : validators) {
-      validator.validate(this);
-    }
     if(isNameNodeHAEnabled()){
         Collection<String> nnHosts = getHostAssignmentsForComponent("NAMENODE");
         if (nnHosts.size() != 2) {
@@ -320,6 +308,11 @@ public class ClusterTopologyImpl implements ClusterTopology {
     }
   }
 
+  @Override
+  public String getDefaultPassword() {
+    return defaultPassword;
+  }
+
   private void registerHostGroupInfo(Map<String, HostGroupInfo> requestedHostGroupInfoMap) throws InvalidTopologyException {
     LOG.debug("Registering requested host group information for {} hostgroups", requestedHostGroupInfoMap.size());
     checkForDuplicateHosts(requestedHostGroupInfoMap);

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
index 2ac9950..36eb1bc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
@@ -20,7 +20,6 @@ package org.apache.ambari.server.topology;
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -399,10 +398,6 @@ public class PersistedStateImpl implements PersistedState {
       return hostGroupInfoMap;
     }
 
-    @Override
-    public List<TopologyValidator> getTopologyValidators() {
-      return Collections.emptyList();
-    }
 
     @Override
     public String getDescription() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 392a53e..643945c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -74,6 +74,7 @@ import org.apache.ambari.server.state.host.HostImpl;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTask;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,7 +90,9 @@ import com.google.inject.persist.Transactional;
 @Singleton
 public class TopologyManager {
 
-  /** internal token for topology related async tasks */
+  /**
+   * internal token for topology related async tasks
+   */
   public static final String INTERNAL_AUTH_TOKEN = "internal_topology_token";
 
   public static final String INITIAL_CONFIG_TAG = "INITIAL";
@@ -135,6 +138,9 @@ public class TopologyManager {
   @Inject
   private SettingDAO settingDAO;
 
+  @Inject
+  private TopologyValidatorService topologyValidatorService;
+
   /**
    * A boolean not cached thread-local (volatile) to prevent double-checked
    * locking on the synchronized keyword.
@@ -264,32 +270,35 @@ public class TopologyManager {
     // get the id prior to creating ambari resources which increments the counter
     final Long provisionId = ambariContext.getNextRequestId();
 
-    boolean configureSecurity = false;
+    SecurityType securityType = null;
+    Credential credential = null;
 
     SecurityConfiguration securityConfiguration = processSecurityConfiguration(request);
 
     if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS) {
-      configureSecurity = true;
+      securityType = SecurityType.KERBEROS;
       addKerberosClient(topology);
 
       // refresh default stack config after adding KERBEROS_CLIENT component to topology
-      topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint
-        ().getServices()));
-
-      // create Cluster resource with security_type = KERBEROS, this will trigger cluster Kerberization
-      // upon host install task execution
-      ambariContext.createAmbariResources(topology, clusterName, SecurityType.KERBEROS, repoVersion);
-      if (securityConfiguration.getDescriptor() != null) {
-        submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor());
-      }
+      topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint().getServices()));
 
-      Credential credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL);
+      credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL);
       if (credential == null) {
         throw new InvalidTopologyException(KDC_ADMIN_CREDENTIAL + " is missing from request.");
       }
+    }
+
+    topologyValidatorService.validateTopologyConfiguration(topology);
+
+    // create resources
+    ambariContext.createAmbariResources(topology, clusterName, securityType, repoVersion);
+
+    if (securityConfiguration != null && securityConfiguration.getDescriptor() != null) {
+      submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor());
+    }
+
+    if (credential != null) {
       submitCredential(clusterName, credential);
-    } else {
-      ambariContext.createAmbariResources(topology, clusterName, null, repoVersion);
     }
 
     long clusterId = ambariContext.getClusterId(clusterName);
@@ -312,8 +321,8 @@ public class TopologyManager {
 
     clusterTopologyMap.put(clusterId, topology);
 
-    addClusterConfigRequest(topology, new ClusterConfigurationRequest(
-      ambariContext, topology, true, stackAdvisorBlueprintProcessor, configureSecurity));
+    addClusterConfigRequest(topology, new ClusterConfigurationRequest(ambariContext, topology, true,
+      stackAdvisorBlueprintProcessor, securityType == SecurityType.KERBEROS));
 
 
     // Notify listeners that cluster configuration finished

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
index cbc6642..4cadefa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.topology;
 
-import java.util.List;
 import java.util.Map;
 
 /**
@@ -70,13 +69,6 @@ public interface TopologyRequest {
   Map<String, HostGroupInfo> getHostGroupInfo();
 
   /**
-   * Get request topology validators.
-   *
-   * @return list of topology validators
-   */
-  List<TopologyValidator> getTopologyValidators();
-
-  /**
    * Get request description.
    *
    * @return string description of the request

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java
new file mode 100644
index 0000000..8bcbcff
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.List;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Topology validator wrapper implementation. Executes a set of validations by calling a preconfgured set of validator implementations.
+ */
+public class ChainedTopologyValidator implements TopologyValidator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ChainedTopologyValidator.class);
+  private List<TopologyValidator> validators;
+
+  public ChainedTopologyValidator(List<TopologyValidator> validators) {
+    this.validators = validators;
+  }
+
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+    for (TopologyValidator validator : validators) {
+      LOGGER.info("Performing topology validation: {}", validator.getClass());
+      validator.validate(topology);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
index 1351739..80b2593 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
@@ -59,7 +59,7 @@ public class HiveServiceValidator implements TopologyValidator {
     }
 
     // hive database settings need the mysql-server component in the blueprint
-    if (!topology.getBlueprint().getServices().contains(MYSQL_SERVER_COMPONENT)) {
+    if (!topology.getBlueprint().getComponents(HIVE_SERVICE).contains(MYSQL_SERVER_COMPONENT)) {
       String errorMessage = String.format("Component [%s] must explicitly be set in the blueprint when hive database " +
         "is configured with the current settings. HIVE service validation failed.", MYSQL_SERVER_COMPONENT);
       LOGGER.error(errorMessage);

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
index 591a124..5b4ecc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
@@ -33,10 +33,10 @@ import org.apache.ambari.server.topology.TopologyValidator;
  */
 public class RequiredPasswordValidator implements TopologyValidator {
 
+  // todo remove the field as all the information is available in the topology being validated
   private String defaultPassword;
 
-  public RequiredPasswordValidator(String defaultPassword) {
-    this.defaultPassword = defaultPassword;
+  public RequiredPasswordValidator() {
   }
 
   /**
@@ -46,6 +46,8 @@ public class RequiredPasswordValidator implements TopologyValidator {
    *                                  default is specified via 'default_password'
    */
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
+
+    defaultPassword = topology.getDefaultPassword();
     Map<String, Map<String, Collection<String>>> missingPasswords = validateRequiredPasswords(topology);
 
     if (! missingPasswords.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
new file mode 100644
index 0000000..f028a31
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Validates whether incoming config types (form the blueprint or the cluster creation template) are valid.
+ * A configuration type is considered valid if the stack based on which the cluster is to be created contains such a
+ * config type.
+ */
+public class StackConfigTypeValidator implements TopologyValidator {
+  private static final Logger LOGGER = LoggerFactory.getLogger(StackConfigTypeValidator.class);
+
+  public StackConfigTypeValidator() {
+  }
+
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+
+    // get the config types form the request
+    Set<String> incomingConfigTypes = new HashSet<>(topology.getConfiguration().getAllConfigTypes());
+
+    if (incomingConfigTypes.isEmpty()) {
+      LOGGER.debug("No config types to be checked.");
+      return;
+    }
+
+    Set<String> stackConfigTypes = new HashSet<>(topology.getBlueprint().getStack().getConfiguration().getAllConfigTypes());
+
+    // remove all "valid" config types from the incoming set
+    incomingConfigTypes.removeAll(stackConfigTypes);
+
+    if (!incomingConfigTypes.isEmpty()) {
+      // there are config types in the request that are not in the stack
+      String message = String.format("The following config types are not defined in the stack: %s ", incomingConfigTypes);
+      LOGGER.error(message);
+      throw new InvalidTopologyException(message);
+    }
+  }
+}
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
new file mode 100644
index 0000000..0e77301
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.List;
+
+import org.apache.ambari.server.topology.TopologyValidator;
+
+import com.google.common.collect.ImmutableList;
+
+public class TopologyValidatorFactory {
+  List<TopologyValidator> validators;
+
+  public TopologyValidatorFactory() {
+    validators = ImmutableList.of(new RequiredPasswordValidator(), new HiveServiceValidator(), new StackConfigTypeValidator());
+  }
+
+  public TopologyValidator createConfigurationValidatorChain() {
+    return new ChainedTopologyValidator(validators);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java
new file mode 100644
index 0000000..425cf1e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import javax.inject.Inject;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Service implementation dealing with topology validation.
+ * It's intended to manage cluster topology validation by grouping validators into different sets as it's imposed by the
+ * callee logic.
+ *
+ * Ideally this service should be used as instead of directly use validator implementations.
+ */
+public class TopologyValidatorService {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(TopologyValidatorService.class);
+
+  @Inject
+  private TopologyValidatorFactory topologyValidatorFactory;
+
+  public TopologyValidatorService() {
+  }
+
+  public void validateTopologyConfiguration(ClusterTopology clusterTopology) throws InvalidTopologyException {
+    LOGGER.info("Validating cluster topology: {}", clusterTopology);
+    topologyValidatorFactory.createConfigurationValidatorChain().validate(clusterTopology);
+  }
+
+}
+
+
+
+
+
+


[22/34] ambari git commit: AMBARI-20762. Add more database options for BEACON in stack advisor.(xiwang)

Posted by nc...@apache.org.
AMBARI-20762. Add more database options for BEACON in stack advisor.(xiwang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c57300a3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c57300a3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c57300a3

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: c57300a359bfdb3225e2660a661733fc58daac2c
Parents: 38f84bf
Author: Xi Wang <xi...@apache.org>
Authored: Thu Apr 13 14:10:16 2017 -0700
Committer: Xi Wang <xi...@apache.org>
Committed: Thu Apr 13 16:11:44 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/stack_advisor.py    | 33 +++++++++++++++++++-
 1 file changed, 32 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c57300a3/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 38f46d7..4e1b4b6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -40,11 +40,42 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
         "HIVE": self.recommendHIVEConfigurations,
         "HBASE": self.recommendHBASEConfigurations,
         "YARN": self.recommendYARNConfigurations,
-        "KAFKA": self.recommendKAFKAConfigurations
+        "KAFKA": self.recommendKAFKAConfigurations,
+        "BEACON": self.recommendBEACONConfigurations
       }
       parentRecommendConfDict.update(childRecommendConfDict)
       return parentRecommendConfDict
 
+  def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
+    beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
+    putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
+
+    # database URL and driver class recommendations
+    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+      putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
+    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+      beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
+      beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
+      protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
+      oldSchemaName = self.getOldValue(services, "beacon-env", "beacon_store_db_name")
+      oldDBType = self.getOldValue(services, "beacon-env", "beacon_database")
+      # under these if constructions we are checking if beacon server hostname available,
+      # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
+      # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
+      if beaconServerHost is not None:
+        if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
+          dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
+          putbeaconEnvProperty('beacon_store_url', dbConnection)
+
+  def getDBConnectionStringBeacon(self, databaseType):
+    driverDict = {
+      'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
+      'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
+    }
+    return driverDict.get(databaseType.upper())
+
   def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]


[33/34] ambari git commit: AMBARI-20753. HDP 3.0 TP - temporarily install RPMs hive2 and remove Pig dependency from Hive (alejandro)

Posted by nc...@apache.org.
AMBARI-20753. HDP 3.0 TP - temporarily install RPMs hive2 and remove Pig dependency from Hive (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/358a5887
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/358a5887
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/358a5887

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 358a58871745fb65e93f1e968c03141ee9c6368a
Parents: 6806d38
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Apr 12 17:35:29 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Apr 17 11:47:26 2017 -0700

----------------------------------------------------------------------
 .../common-services/HIVE/2.1.0.3.0/metainfo.xml          | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/358a5887/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
index 6d9098f..4225f19 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
@@ -424,6 +424,9 @@
         <osSpecific>
           <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
+            <!--
+            TODO AMBARI-20753
+            Re-add after Hive has all of its packages using the correct RPM name.
             <package>
               <name>hive-${stack_version}</name>
             </package>
@@ -438,10 +441,11 @@
               <condition>should_install_hive_atlas</condition>
             </package>
             <package>
-              <name>hive2-${stack_version}</name>
+              <name>tez-hive2-${stack_version}</name>
             </package>
+            -->
             <package>
-              <name>tez-hive2-${stack_version}</name>
+              <name>hive2-${stack_version}</name>
             </package>
           </packages>
         </osSpecific>
@@ -491,7 +495,10 @@
         <service>HDFS</service>
         <service>YARN</service>
         <service>TEZ</service>
+        <!-- TODO AMBARI-20753
+        Re-add after Pig service is being packaged.
         <service>PIG</service>
+        -->
         <service>SLIDER</service>
       </requiredServices>
 


[03/34] ambari git commit: Revert "AMBARI-20628. Ambari doesn't set properties correctly (magyari_sandor)"

Posted by nc...@apache.org.
Revert "AMBARI-20628. Ambari doesn't set properties correctly (magyari_sandor)"

This reverts commit 9741236f3c8b4a8a293fa0b7db5c64c0e7aa704d.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ef34cb4e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ef34cb4e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ef34cb4e

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: ef34cb4ee57c58687ba7db7adfbb5960c0b267da
Parents: 64e88e0
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Wed Apr 12 20:24:53 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Wed Apr 12 20:24:53 2017 +0200

----------------------------------------------------------------------
 .../stackadvisor/StackAdvisorBlueprintProcessor.java         | 8 ++------
 .../stackadvisor/StackAdvisorBlueprintProcessorTest.java     | 4 ++--
 2 files changed, 4 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ef34cb4e/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
index b7eca71..0abcc14 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
@@ -175,11 +175,7 @@ public class StackAdvisorBlueprintProcessor {
     Preconditions.checkArgument(response.getRecommendations().getBlueprint().getConfigurations() != null,
       "Configurations are missing from the recommendation blueprint response.");
 
-    Map<String, Map<String, String>> userProvidedProperties = existingConfigurations;
-    if (topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY) {
-      userProvidedProperties = getUserProvidedProperties(topology, existingConfigurations);
-    }
-
+    Map<String, Map<String, String>> userProvidedProperties = getUserProvidedProperties(topology, existingConfigurations);
     Map<String, BlueprintConfigurations> recommendedConfigurations =
       response.getRecommendations().getBlueprint().getConfigurations();
     for (Map.Entry<String, BlueprintConfigurations> configEntry : recommendedConfigurations.entrySet()) {
@@ -187,7 +183,7 @@ public class StackAdvisorBlueprintProcessor {
       BlueprintConfigurations blueprintConfig = filterBlueprintConfig(configType, configEntry.getValue(),
         userProvidedProperties, topology);
       topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration(
-              blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
+        blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ef34cb4e/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
index 3febd48..49f070a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
@@ -114,7 +114,7 @@ public class StackAdvisorBlueprintProcessorTest {
     expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
     expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
-    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY).anyTimes();
+    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(stack.getVersion()).andReturn("2.3").anyTimes();
     expect(stack.getName()).andReturn("HDP").anyTimes();
@@ -149,7 +149,7 @@ public class StackAdvisorBlueprintProcessorTest {
     expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
     expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
-    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY).anyTimes();
+    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(stack.getVersion()).andReturn("2.3").anyTimes();
     expect(stack.getName()).andReturn("HDP").anyTimes();


[20/34] ambari git commit: AMBARI-20650: Remove Server attribute from the HTTP response headers (sangeetar)

Posted by nc...@apache.org.
AMBARI-20650: Remove Server attribute from the HTTP response headers (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f894e486
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f894e486
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f894e486

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f894e486a6f8eab31aec5add517a35ec9a11b655
Parents: cfde36c
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Thu Apr 13 15:08:47 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Thu Apr 13 15:08:47 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/ambari/server/controller/AmbariServer.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f894e486/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 4e7af0c..f80d2dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -310,6 +310,7 @@ public class AmbariServer {
     initDB();
     server = new Server();
     server.setSessionIdManager(sessionIdManager);
+    server.setSendServerVersion(false);
     Server serverForAgent = new Server();
 
     setSystemProperties(configs);


[34/34] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-12556

Posted by nc...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-12556


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b013be0b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b013be0b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b013be0b

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: b013be0b97c140b47d77149dcce93ee4f4d90b74
Parents: 9f63871 358a588
Author: Nate Cole <nc...@hortonworks.com>
Authored: Mon Apr 17 16:17:45 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Mon Apr 17 16:17:45 2017 -0400

----------------------------------------------------------------------
 .../stackVersions/StackVersionsCreateCtrl.js    |  19 +
 .../ui/admin-web/app/scripts/services/Stack.js  |   3 +-
 .../views/stackVersions/stackVersionPage.html   |   3 +-
 .../ambari_agent/StatusCommandsExecutor.py      | 279 +-------
 .../libraries/functions/decorator.py            |  23 +-
 .../libraries/functions/jmx.py                  |   7 +-
 .../libraries/functions/namenode_ha_utils.py    |   6 +-
 .../libraries/functions/stack_select.py         |   5 +
 .../libraries/functions/stack_tools.py          |  10 +
 .../libraries/functions/version_select_util.py  |  42 ++
 .../libraries/script/script.py                  |  63 +-
 .../HIVE/package/scripts/mysql_service.py       |   5 +
 .../infra/solr/AmbariSolrCloudClient.java       |   4 +-
 .../logsearch/steps/LogSearchDockerSteps.java   |  14 +-
 .../ambari/logsearch/doc/DocConstants.java      |   2 +
 .../logsearch/manager/AuditLogsManager.java     |   5 +
 .../ambari/logsearch/manager/ManagerBase.java   |  30 +
 .../logsearch/manager/ServiceLogsManager.java   |   5 +
 .../logsearch/rest/AuditLogsResource.java       |  10 +
 .../logsearch/rest/ServiceLogsResource.java     |  11 +
 .../ambari/server/agent/ExecutionCommand.java   |  11 +
 .../StackAdvisorBlueprintProcessor.java         |  61 +-
 .../AmbariCustomCommandExecutionHelper.java     |  20 +-
 .../AmbariManagementControllerImpl.java         |   1 +
 .../ambari/server/controller/AmbariServer.java  |   1 +
 .../internal/ClientConfigResourceProvider.java  |   1 +
 .../ClusterStackVersionResourceProvider.java    |   3 +
 .../internal/ExportBlueprintRequest.java        |   6 -
 .../HostStackVersionResourceProvider.java       |   5 +
 .../internal/ProvisionClusterRequest.java       |  21 +-
 .../internal/ScaleClusterRequest.java           |   7 -
 .../internal/UpgradeResourceProvider.java       |   8 +-
 .../VersionDefinitionResourceProvider.java      |   4 +
 .../orm/entities/HostRoleCommandEntity.java     |   2 +-
 .../server/state/stack/upgrade/Grouping.java    |   2 +-
 .../state/stack/upgrade/StageWrapper.java       |  65 ++
 .../ambari/server/state/stack/upgrade/Task.java |   6 +
 .../server/state/stack/upgrade/TaskWrapper.java |  25 +-
 .../state/stack/upgrade/TaskWrapperBuilder.java |   5 +-
 .../topology/ClusterConfigurationRequest.java   |   8 +-
 .../ambari/server/topology/ClusterTopology.java |   2 +
 .../server/topology/ClusterTopologyImpl.java    |  37 +-
 .../server/topology/PersistedStateImpl.java     |   5 -
 .../ambari/server/topology/TopologyManager.java |  43 +-
 .../ambari/server/topology/TopologyRequest.java |   8 -
 .../validators/ChainedTopologyValidator.java    |  58 ++
 .../validators/HiveServiceValidator.java        |   2 +-
 .../validators/RequiredPasswordValidator.java   |   6 +-
 .../validators/StackConfigTypeValidator.java    |  64 ++
 .../validators/TopologyValidatorFactory.java    |  34 +
 .../validators/TopologyValidatorService.java    |  52 ++
 .../server/upgrade/SchemaUpgradeHelper.java     |   1 +
 .../server/upgrade/UpgradeCatalog251.java       |  81 +++
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   2 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   2 +-
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  45 +-
 .../package/scripts/datanode_upgrade.py         |  38 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  11 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   2 +
 .../common-services/HIVE/2.1.0.3.0/metainfo.xml |  11 +-
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  |  12 +-
 .../KAFKA/0.8.1/package/scripts/kafka_broker.py |   6 +
 .../YARN/3.0.0.3.0/service_advisor.py           |  76 +++
 .../custom_actions/scripts/install_packages.py  |   2 +-
 .../HIVE/package/scripts/mysql_service.py       |   5 +-
 .../HIVE/package/scripts/postgresql_service.py  |   5 +-
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |   2 +-
 .../before-ANY/scripts/shared_initialization.py |   4 +-
 .../scripts/shared_initialization.py            |   5 +-
 .../stacks/HDP/2.3/services/stack_advisor.py    |   6 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |   2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |   2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |   2 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |   2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |   2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |   2 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |   2 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     |   2 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   2 +-
 .../services/YARN/configuration/yarn-env.xml    |  18 +
 .../stacks/HDP/2.6/services/stack_advisor.py    | 113 +++-
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   2 +-
 .../before-ANY/scripts/shared_initialization.py |   4 +-
 .../src/main/resources/upgrade-pack.xsd         |   1 +
 .../StackAdvisorBlueprintProcessorTest.java     |   6 +-
 .../ServicesNamenodeTruncateCheckTest.java      |   1 -
 .../AmbariCustomCommandExecutionHelperTest.java |   1 +
 .../AmbariManagementControllerTest.java         |   4 +-
 .../BlueprintConfigurationProcessorTest.java    | 660 ++++++++++---------
 .../internal/ProvisionClusterRequestTest.java   |  32 -
 .../internal/ScaleClusterRequestTest.java       |   6 -
 .../internal/UpgradeResourceProviderTest.java   |  66 +-
 .../ambari/server/stack/StackManagerTest.java   |  12 +
 .../ClusterDeployWithStartOnlyTest.java         |  37 +-
 ...InstallWithoutStartOnComponentLevelTest.java |  33 +-
 .../ClusterInstallWithoutStartTest.java         |  37 +-
 .../topology/ClusterTopologyImplTest.java       |  57 +-
 .../topology/RequiredPasswordValidatorTest.java | 113 +++-
 .../server/topology/TopologyManagerTest.java    |   5 +-
 .../validators/HiveServiceValidatorTest.java    |   3 +
 .../StackConfigTypeValidatorTest.java           | 126 ++++
 .../server/upgrade/UpgradeCatalog251Test.java   | 166 +++++
 .../server/upgrade/UpgradeCatalog300Test.java   |   6 +
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  66 +-
 .../hooks/after-INSTALL/test_after_install.py   |  25 +-
 .../stacks/2.5/common/test_stack_advisor.py     |   7 +
 .../stacks/2.6/common/test_stack_advisor.py     | 452 ++++++++++++-
 .../stacks/HDP/2.1.1/upgrades/upgrade_test.xml  |   2 +-
 .../manage_alert_notifications_controller.js    |  28 +-
 ambari-web/app/messages.js                      |   2 +
 .../app/styles/theme/bootstrap-ambari.css       |  20 +-
 .../stack_upgrade/stack_upgrade_wizard.hbs      |  10 +-
 .../main/alerts/create_alert_notification.hbs   |  12 +
 ...anage_alert_notifications_controller_test.js | 165 ++++-
 .../resources/ui/app/components/job-details.js  |  30 +-
 .../app/templates/components/bundle-config.hbs  |   2 +-
 .../app/templates/components/coord-config.hbs   |   2 +-
 .../app/templates/components/flow-designer.hbs  |   2 +-
 .../app/templates/components/hdfs-browser.hbs   |  44 +-
 .../components/workflow-job-details.hbs         |   6 +
 124 files changed, 2717 insertions(+), 1128 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
index df1aa37,7d1f907..2954f0d
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
@@@ -53,8 -50,6 +53,7 @@@ public class ServicesNamenodeTruncateCh
  
    private Clusters m_clusters = EasyMock.createMock(Clusters.class);
    private ServicesNamenodeTruncateCheck m_check = new ServicesNamenodeTruncateCheck();
-   private final Map<String, String> m_configMap = new HashMap<String, String>();
 +  private RepositoryVersionDAO m_repositoryVersionDAO = EasyMock.createMock(RepositoryVersionDAO.class);
    private final Map<String, String> m_configMap = new HashMap<>();
  
    @Before

http://git-wip-us.apache.org/repos/asf/ambari/blob/b013be0b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------


[24/34] ambari git commit: AMBARI-20682. Wait For DataNodes To Shutdown During a Rolling Upgrade. Fix (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-20682. Wait For DataNodes To Shutdown During a Rolling Upgrade. Fix (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e9cf9dd1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e9cf9dd1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e9cf9dd1

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: e9cf9dd17a7fcf170bb253a8d0b037910299b38c
Parents: bf63795
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Apr 14 11:35:20 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Apr 14 11:35:20 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/resource_management/libraries/script/script.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e9cf9dd1/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index bad09d2..2c441ad 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -370,7 +370,7 @@ class Script(object):
       
       show_logs(log_folder, user, lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED, mask=OUT_FILES_MASK)
 
-  def post_start(self):
+  def post_start(self, env):
     pid_files = self.get_pid_files()
     if pid_files == []:
       Logger.logger.warning("Pid files for current script are not defined")


[31/34] ambari git commit: AMBARI-20737 Able to hide the Use Public Repository option on Register Version page on Ambari Admin View (dili)

Posted by nc...@apache.org.
AMBARI-20737 Able to hide the Use Public Repository option on Register Version page on Ambari Admin View (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1941eedb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1941eedb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1941eedb

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 1941eedb5236013c7ce5477466bb42b837b7be3b
Parents: 22b114d
Author: Di Li <di...@apache.org>
Authored: Mon Apr 17 12:17:27 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Mon Apr 17 12:17:27 2017 -0400

----------------------------------------------------------------------
 .../stackVersions/StackVersionsCreateCtrl.js     | 19 +++++++++++++++++++
 .../ui/admin-web/app/scripts/services/Stack.js   |  3 ++-
 .../views/stackVersions/stackVersionPage.html    |  3 +--
 .../VersionDefinitionResourceProvider.java       |  4 ++++
 .../ambari/server/stack/StackManagerTest.java    | 12 ++++++++++++
 5 files changed, 38 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1941eedb/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index b3c27dc..69c35c0 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -29,6 +29,7 @@ angular.module('ambariAdminConsole')
   $scope.stackIds = [];
   $scope.allVersions = [];
   $scope.networkLost = false;
+  $scope.stackRepoUpdateLinkExists = true;
   $scope.skipValidation = false;
   $scope.useRedhatSatellite = false;
 
@@ -543,6 +544,23 @@ angular.module('ambariAdminConsole')
     })[0];
   };
 
+  /**
+   * Return true if at least one stacks have the repo URL link in the repoinfo.xml
+   * @return boolean
+   * */
+  $scope.setStackRepoUpdateLinkExists = function (versions) {
+    var stackRepoUpdateLinkExists = versions.find(function(_version){
+      return _version.stackRepoUpdateLinkExists;
+    });
+
+    //Found at least one version with the stack repo update link
+    if (stackRepoUpdateLinkExists){
+      $scope.stackRepoUpdateLinkExists = true;
+    } else {
+      $scope.stackRepoUpdateLinkExists = false;
+    }
+  };
+
   $scope.setNetworkIssues = function (versions) {
    $scope.networkLost = !versions.find(function(_version){
      return !_version.stackDefault;
@@ -576,6 +594,7 @@ angular.module('ambariAdminConsole')
         $scope.selectedPublicRepoVersion = $scope.activeStackVersion;
         $scope.setVersionSelected($scope.activeStackVersion);
         $scope.setNetworkIssues(versions);
+        $scope.setStackRepoUpdateLinkExists(versions);
         $scope.validateRepoUrl();
         $scope.availableStackRepoList = versions.length == 1 ? [] : versions;
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1941eedb/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
index e028906..b496987 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
@@ -88,7 +88,7 @@ angular.module('ambariAdminConsole')
     },
 
     allPublicStackVersions: function() {
-      var url = '/version_definitions?fields=VersionDefinition/stack_default,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
+      var url = '/version_definitions?fields=VersionDefinition/stack_default,VersionDefinition/stack_repo_update_link_exists,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
         '&VersionDefinition/show_available=true';
       var deferred = $q.defer();
       $http.get(Settings.baseUrl + url, {mock: 'version/versions.json'})
@@ -100,6 +100,7 @@ angular.module('ambariAdminConsole')
               stackName: version.VersionDefinition.stack_name,
               stackVersion: version.VersionDefinition.stack_version,
               stackDefault: version.VersionDefinition.stack_default,
+              stackRepoUpdateLinkExists: version.VersionDefinition.stack_repo_update_link_exists,
               stackNameVersion:  version.VersionDefinition.stack_name + '-' + version.VersionDefinition.stack_version,
               displayName: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version.split('-')[0], //HDP-2.3.4.0
               displayNameFull: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version, //HDP-2.3.4.0-23

http://git-wip-us.apache.org/repos/asf/ambari/blob/1941eedb/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index 3bee2a1..fe08802 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -115,8 +115,7 @@
   </div>
 
   <div id="upload-definition-file-panel" ng-if="createController">
-
-    <div class="col-sm-12 big-radio clearfix" ng-class="{'disabled' : networkLost || useRedhatSatellite}">
+    <div class="col-sm-12 big-radio clearfix hide-soft" ng-class="{'disabled' : networkLost || useRedhatSatellite,'visible':stackRepoUpdateLinkExists}">
       <input type="radio" ng-model="selectedOption.index" value="1" ng-change="togglePublicLocalOptionSelect()" ng-disabled="networkLost || useRedhatSatellite">
       <span>{{'versions.usePublic' | translate}}</span>
       <a id="public-disabled-link" href="javascript:void(0);" ng-if="networkLost" ng-click="showPublicRepoDisabledDialog()">{{'versions.networkIssues.networkLost'| translate}}</a>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1941eedb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
index 352aa2d..7914fd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
@@ -102,6 +102,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
   protected static final String VERSION_DEF_AVAILABLE_SERVICES       = "VersionDefinition/services";
   protected static final String VERSION_DEF_STACK_SERVICES           = "VersionDefinition/stack_services";
   protected static final String VERSION_DEF_STACK_DEFAULT            = "VersionDefinition/stack_default";
+  protected static final String VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS = "VersionDefinition/stack_repo_update_link_exists";
   protected static final String VERSION_DEF_DISPLAY_NAME             = "VersionDefinition/display_name";
   protected static final String VERSION_DEF_VALIDATION               = "VersionDefinition/validation";
   protected static final String SHOW_AVAILABLE                       = "VersionDefinition/show_available";
@@ -157,6 +158,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
       VERSION_DEF_AVAILABLE_SERVICES,
       VERSION_DEF_STACK_SERVICES,
       VERSION_DEF_STACK_DEFAULT,
+      VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS,
       VERSION_DEF_DISPLAY_NAME,
       VERSION_DEF_VALIDATION,
       VERSION_DEF_MIN_JDK,
@@ -617,6 +619,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
     setResourceProperty(resource, VERSION_DEF_RELEASE_NOTES, xml.release.releaseNotes, requestedIds);
     setResourceProperty(resource, VERSION_DEF_RELEASE_VERSION, xml.release.version, requestedIds);
     setResourceProperty(resource, VERSION_DEF_STACK_DEFAULT, xml.isStackDefault(), requestedIds);
+    setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     setResourceProperty(resource, VERSION_DEF_DISPLAY_NAME, xml.release.display, requestedIds);
 
     if (null != validations) {
@@ -686,6 +689,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc
       setResourceProperty(resource, VERSION_DEF_STACK_SERVICES, xml.getStackServices(stack), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MIN_JDK, stack.getMinJdk(), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MAX_JDK, stack.getMaxJdk(), requestedIds);
+      setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     }
 
     return resource;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1941eedb/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 507c560..8c7ab9d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -1025,4 +1025,16 @@ public class StackManagerTest {
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + infraSolrRoleCommand, logsearchLogfeederBlockers.contains(infraSolrRoleCommand));
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + logsearchServerRoleCommand, logsearchLogfeederBlockers.contains(logsearchServerRoleCommand));
   }
+
+  @Test
+  public void testVersionDefinitionStackRepoUpdateLinkExists(){
+    // Get the base sqoop service
+    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
+    String latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri != null);
+
+    stack = stackManager.getStack("HDP", "2.0.8");
+    latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri == null);
+  }
 }


[08/34] ambari git commit: AMBARI-20744. Log Search: Add get clusters endpoint for service and audit logs (oleewere)

Posted by nc...@apache.org.
AMBARI-20744. Log Search: Add get clusters endpoint for service and audit logs (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/52203c3e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/52203c3e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/52203c3e

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 52203c3ea2d5f04d3803b11e61aaa1eafd72b64a
Parents: 4f2523e
Author: oleewere <ol...@gmail.com>
Authored: Wed Apr 12 17:56:40 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Thu Apr 13 12:31:23 2017 +0200

----------------------------------------------------------------------
 .../ambari/logsearch/doc/DocConstants.java      |  2 ++
 .../logsearch/manager/AuditLogsManager.java     |  5 ++++
 .../ambari/logsearch/manager/ManagerBase.java   | 30 ++++++++++++++++++++
 .../logsearch/manager/ServiceLogsManager.java   |  5 ++++
 .../logsearch/rest/AuditLogsResource.java       | 10 +++++++
 .../logsearch/rest/ServiceLogsResource.java     | 11 +++++++
 6 files changed, 63 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 2afa3dd..00adb67 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -48,6 +48,7 @@ public class DocConstants {
   }
 
   public class AuditOperationDescriptions {
+    public static final String GET_AUDIT_CLUSTERS_OD = "Get all of the clusters for audit logs";
     public static final String GET_AUDIT_SCHEMA_FIELD_LIST_OD = "Get list of schema fields in audit collection";
     public static final String GET_AUDIT_LOGS_OD = "Get the list of logs details";
     public static final String PURGE_AUDIT_LOGS_OD = "Purge service logs based by criteria";
@@ -77,6 +78,7 @@ public class DocConstants {
   }
 
   public class ServiceOperationDescriptions {
+    public static final String GET_SERVICE_CLUSTERS_OD = "Get all of the clusters for service logs";
     public static final String SEARCH_LOGS_OD = "Searching logs entry";
     public static final String PURGE_LOGS_OD = "Purge service logs based by criteria";
     public static final String GET_HOSTS_OD = "Get the list of service hosts currently active or having data in Solr";

http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
index 25e3271..99d2675 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
@@ -71,6 +71,7 @@ import org.springframework.data.solr.core.query.SimpleFacetQuery;
 import org.springframework.data.solr.core.query.SimpleQuery;
 
 import static org.apache.ambari.logsearch.solr.SolrConstants.AuditLogConstants.AUDIT_COMPONENT;
+import static org.apache.ambari.logsearch.solr.SolrConstants.CommonLogConstants.CLUSTER;
 
 @Named
 public class AuditLogsManager extends ManagerBase<SolrAuditLogData, AuditLogResponse> {
@@ -221,4 +222,8 @@ public class AuditLogsManager extends ManagerBase<SolrAuditLogData, AuditLogResp
     UpdateResponse updateResponse = auditSolrDao.deleteByQuery(solrQuery, "/audit/logs");
     return new StatusMessage(updateResponse.getStatus());
   }
+
+  public List<String> getClusters() {
+    return getClusters(auditSolrDao, CLUSTER, "/audit/logs/clusters");
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
index 6b40cb5..cddfc85 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
@@ -23,17 +23,25 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import com.google.common.collect.Lists;
+import org.apache.ambari.logsearch.common.LogSearchConstants;
+import org.apache.ambari.logsearch.model.response.GroupListResponse;
 import org.apache.ambari.logsearch.model.response.LogData;
 import org.apache.ambari.logsearch.model.response.LogSearchResponse;
 import org.apache.ambari.logsearch.dao.SolrDaoBase;
+import org.apache.ambari.logsearch.util.SolrUtil;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.log4j.Logger;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.response.FacetField;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocumentList;
 import org.springframework.data.solr.core.DefaultQueryParser;
 import org.springframework.data.solr.core.query.SimpleQuery;
 import org.springframework.data.solr.core.query.SolrDataQuery;
 
+import static org.apache.ambari.logsearch.solr.SolrConstants.CommonLogConstants.CLUSTER;
+
 public abstract class ManagerBase<LOG_DATA_TYPE extends LogData, SEARCH_RESPONSE extends LogSearchResponse> extends JsonManagerBase {
   private static final Logger logger = Logger.getLogger(ManagerBase.class);
 
@@ -92,4 +100,26 @@ public abstract class ManagerBase<LOG_DATA_TYPE extends LogData, SEARCH_RESPONSE
   protected abstract List<LOG_DATA_TYPE> convertToSolrBeans(QueryResponse response);
 
   protected abstract SEARCH_RESPONSE createLogSearchResponse();
+
+  protected List<String> getClusters(SolrDaoBase solrDaoBase, String clusterField, String event) {
+    List<String> clusterResponse = Lists.newArrayList();
+    SolrQuery solrQuery = new SolrQuery();
+    solrQuery.setQuery("*:*");
+    SolrUtil.setFacetField(solrQuery, clusterField);
+    SolrUtil.setFacetSort(solrQuery, LogSearchConstants.FACET_INDEX);
+
+    QueryResponse response = solrDaoBase.process(solrQuery, event);
+    if (response == null) {
+      return clusterResponse;
+    }
+    List<FacetField> clusterFields = response.getFacetFields();
+    if (CollectionUtils.isNotEmpty(clusterFields)) {
+      FacetField clusterFacets = clusterFields.get(0);
+      for (FacetField.Count clusterCount : clusterFacets.getValues()) {
+        clusterResponse.add(clusterCount.getName());
+      }
+    }
+    return clusterResponse;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
index 9ce209b..cb9e806 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
@@ -72,6 +72,7 @@ import org.apache.ambari.logsearch.model.response.ServiceLogResponse;
 import org.apache.ambari.logsearch.converter.BaseServiceLogRequestQueryConverter;
 import org.apache.ambari.logsearch.converter.ServiceLogTruncatedRequestQueryConverter;
 import org.apache.ambari.logsearch.solr.ResponseDataGenerator;
+import org.apache.ambari.logsearch.solr.SolrConstants;
 import org.apache.ambari.logsearch.solr.model.SolrComponentTypeLogData;
 import org.apache.ambari.logsearch.solr.model.SolrHostLogData;
 import org.apache.ambari.logsearch.solr.model.SolrServiceLogData;
@@ -614,4 +615,8 @@ public class ServiceLogsManager extends ManagerBase<SolrServiceLogData, ServiceL
     UpdateResponse updateResponse = serviceLogsSolrDao.deleteByQuery(solrQuery, "/service/logs");
     return new StatusMessage(updateResponse.getStatus());
   }
+
+  public List<String> getClusters() {
+    return getClusters(serviceLogsSolrDao, CLUSTER, "/service/logs/clusters");
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
index 00bf07c..5312da8 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
@@ -44,6 +44,8 @@ import org.apache.ambari.logsearch.model.request.impl.AuditLogRequest;
 import org.apache.ambari.logsearch.manager.AuditLogsManager;
 import org.springframework.context.annotation.Scope;
 
+import java.util.List;
+
 import static org.apache.ambari.logsearch.doc.DocConstants.AuditOperationDescriptions.*;
 
 @Api(value = "audit/logs", description = "Audit log operations")
@@ -117,4 +119,12 @@ public class AuditLogsResource {
     return auditLogsManager.getServiceLoad(request);
   }
 
+  @GET
+  @Path("/clusters")
+  @Produces({"application/json"})
+  @ApiOperation(GET_AUDIT_CLUSTERS_OD)
+  public List<String> getClustersForServiceLog() {
+    return auditLogsManager.getClusters();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/52203c3e/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
index 498da69..e02acb8 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
@@ -58,6 +58,8 @@ import org.apache.ambari.logsearch.model.response.ServiceLogResponse;
 import org.apache.ambari.logsearch.manager.ServiceLogsManager;
 import org.springframework.context.annotation.Scope;
 
+import java.util.List;
+
 import static org.apache.ambari.logsearch.doc.DocConstants.ServiceOperationDescriptions.*;
 
 @Api(value = "service/logs", description = "Service log operations")
@@ -219,4 +221,13 @@ public class ServiceLogsResource {
   public HostLogFilesResponse getHostLogFiles(@Valid @BeanParam HostLogFilesRequest request) {
     return serviceLogsManager.getHostLogFileData(request);
   }
+
+  @GET
+  @Path("/clusters")
+  @Produces({"application/json"})
+  @ApiOperation(GET_SERVICE_CLUSTERS_OD)
+  public List<String> getClustersForServiceLog() {
+    return serviceLogsManager.getClusters();
+  }
+
 }


[27/34] ambari git commit: AMBARI-20755 topology configuration type validation on blueprint deployments

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/103e49a8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index dba4043..5c1836a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -58,8 +58,10 @@ import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupImpl;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyRequest;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
 import org.easymock.MockType;
 import org.junit.After;
@@ -77,7 +79,7 @@ import com.google.common.collect.Maps;
 /**
  * BlueprintConfigurationProcessor unit tests.
  */
-public class BlueprintConfigurationProcessorTest {
+public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
   private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
   private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
@@ -97,6 +99,9 @@ public class BlueprintConfigurationProcessorTest {
   @Mock(type = MockType.NICE)
   private Stack stack;
 
+  @Mock
+  private TopologyRequest topologyRequestMock;
+
   @Before
   public void init() throws Exception {
     expect(bp.getStack()).andReturn(stack).anyTimes();
@@ -109,7 +114,7 @@ public class BlueprintConfigurationProcessorTest {
     expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
 
     expect(serviceInfo.getRequiredProperties()).andReturn(
-        Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
+      Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
     expect(serviceInfo.getRequiredServices()).andReturn(Collections.<String>emptyList()).anyTimes();
 
     Collection<String> hdfsComponents = new HashSet<>();
@@ -208,7 +213,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -295,7 +300,7 @@ public class BlueprintConfigurationProcessorTest {
       ImmutableMap.of("admin-properties", rangerAdminProperties);
 
 
-    Configuration clusterConfig = new Configuration(properties, ImmutableMap.<String, Map<String,Map<String,String>>>of());
+    Configuration clusterConfig = new Configuration(properties, ImmutableMap.<String, Map<String, Map<String, String>>>of());
 
     Collection<String> hostGroup1Components = ImmutableSet.of("RANGER_ADMIN");
     TestHostGroup group1 = new TestHostGroup("group1", hostGroup1Components, Collections.singleton("testhost1"));
@@ -331,10 +336,10 @@ public class BlueprintConfigurationProcessorTest {
     parentProperties.put("yarn-site", parentYarnSiteProps);
 
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -367,7 +372,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -385,14 +390,14 @@ public class BlueprintConfigurationProcessorTest {
     group2Properties.put("yarn-site", group2YarnSiteProps);
     // host group config -> BP config -> cluster scoped config
     Configuration group2BPConfiguration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     Configuration group2Configuration = new Configuration(group2Properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfiguration);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfiguration);
 
     // set config on hostgroup
     TestHostGroup group2 = new TestHostGroup("group2", hgComponents2,
-        Collections.singleton("testhost2"), group2Configuration);
+      Collections.singleton("testhost2"), group2Configuration);
 
     Collection<TestHostGroup> hostGroups = new HashSet<>();
     hostGroups.add(group1);
@@ -404,7 +409,7 @@ public class BlueprintConfigurationProcessorTest {
 
     assertEquals("%HOSTGROUP::group1%", properties.get("yarn-site").get("yarn.resourcemanager.hostname"));
     assertEquals("%HOSTGROUP::group1%",
-        group2Configuration.getPropertyValue("yarn-site", "yarn.resourcemanager.resource-tracker.address"));
+      group2Configuration.getPropertyValue("yarn-site", "yarn.resourcemanager.resource-tracker.address"));
   }
 
   @Test
@@ -415,7 +420,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("core-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -447,7 +452,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -479,7 +484,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("hbase-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -526,7 +531,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("webhcat-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -577,7 +582,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("storm-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -637,7 +642,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("hive-site", hiveSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -671,7 +676,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("hive-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -756,7 +761,7 @@ public class BlueprintConfigurationProcessorTest {
     assertFalse("Password property should have been excluded",
       properties.get("ranger-yarn-plugin-properties").containsKey("REPOSITORY_CONFIG_PASSWORD"));
     assertFalse("Password property should have been excluded",
-                properties.get("ranger-yarn-plugin-properties").containsKey("SSL_KEYSTORE_PASSWORD"));
+      properties.get("ranger-yarn-plugin-properties").containsKey("SSL_KEYSTORE_PASSWORD"));
     assertFalse("Password property should have been excluded",
       properties.get("ranger-yarn-plugin-properties").containsKey("SSL_TRUSTSTORE_PASSWORD"));
     assertFalse("Password property should have been excluded",
@@ -771,10 +776,10 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify that the following password properties matching the "*_SECRET" rule have been excluded
     assertFalse("Secret property should have been excluded",
-	      properties.get("secret-test-properties").containsKey("knox_master_secret"));
+      properties.get("secret-test-properties").containsKey("knox_master_secret"));
     // verify that the property that does not match the "*_SECRET" rule is still included
     assertTrue("Expected secret property not found",
-	      properties.get("secret-test-properties").containsKey("test.secret.should.be.included"));
+      properties.get("secret-test-properties").containsKey("test.secret.should.be.included"));
     // verify the custom properties map has been modified by the filters
     assertEquals("custom-test-properties type was not properly exported",
       2, properties.get("custom-test-properties").size());
@@ -808,7 +813,7 @@ public class BlueprintConfigurationProcessorTest {
     falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + expectedHostName + "@EXAMPLE.COM");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -828,13 +833,13 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Falcon Broker URL property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
 
     assertEquals("Falcon Kerberos Principal property not properly exported",
       "falcon/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
 
     assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
-        "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+      "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
   }
 
   @Test
@@ -925,7 +930,7 @@ public class BlueprintConfigurationProcessorTest {
     assertFalse("kdc_hosts should not be present in exported blueprint in kerberos-env",
       kerberosEnvProperties.containsKey("kdc_hosts"));
     assertFalse("master_kdc should not be present in exported blueprint in kerberos-env",
-        kerberosEnvProperties.containsKey("master_kdc"));
+      kerberosEnvProperties.containsKey("master_kdc"));
     assertEquals("hadoop.proxyuser.yarn.hosts was not exported correctly",
       createExportedHostName("host_group_1"), coreSiteProperties.get("hadoop.proxyuser.yarn.hosts"));
   }
@@ -962,7 +967,7 @@ public class BlueprintConfigurationProcessorTest {
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -982,19 +987,19 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
   }
 
   @Test
@@ -1035,7 +1040,7 @@ public class BlueprintConfigurationProcessorTest {
     hadoopEnvProperties.put("dfs_ha_initial_namenode_standby", expectedHostName);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1055,19 +1060,19 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
@@ -1095,7 +1100,7 @@ public class BlueprintConfigurationProcessorTest {
     accumuloSiteProperties.put("instance.volumes", "hdfs://" + expectedNameService + "/apps/accumulo/data");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1116,11 +1121,11 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify that any properties that include nameservices are not removed from the exported blueprint's configuration
     assertEquals("Property containing an HA nameservice (fs.defaultFS), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
     assertEquals("Property containing an HA nameservice (hbase.rootdir), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + expectedNameService + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
     assertEquals("Property containing an HA nameservice (instance.volumes), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -1131,10 +1136,10 @@ public class BlueprintConfigurationProcessorTest {
     configProperties.put("hdfs-site", hdfsSiteProperties);
 
     assertEquals("Incorrect initial state for hdfs-site config",
-        0, hdfsSiteProperties.size());
+      0, hdfsSiteProperties.size());
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1151,7 +1156,7 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Incorrect state for hdfs-site config after HA call in non-HA environment, should be zero",
-        0, hdfsSiteProperties.size());
+      0, hdfsSiteProperties.size());
   }
 
   @Test
@@ -1193,17 +1198,17 @@ public class BlueprintConfigurationProcessorTest {
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
     // are not validated
     Collection<String> groupComponents = new HashSet<>();
-    groupComponents.add("RESOURCEMANAGER");
+    groupComponents.add("NAMENODE");
     Collection<String> hosts = new ArrayList<>();
     hosts.add(expectedHostNameOne);
     hosts.add(expectedHostNameTwo);
-    hosts.add("serverTwo");
+//    hosts.add("serverTwo");
     TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
     Collection<TestHostGroup> hostGroups = new HashSet<>();
@@ -1215,36 +1220,36 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify results for name service one
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
 
     // verify results for name service two
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
   }
 
   @Test
@@ -1258,7 +1263,7 @@ public class BlueprintConfigurationProcessorTest {
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -1271,7 +1276,7 @@ public class BlueprintConfigurationProcessorTest {
     yarnSiteProperties.put("yarn.log.server.web-service.url", expectedHostName + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1297,21 +1302,21 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("Yarn ResourceManager tracker address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
     assertEquals("Yarn ResourceManager timeline web service url was incorrectly exported",
-            createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.log.server.web-service.url"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.log.server.web-service.url"));
   }
 
   @Test
@@ -1325,7 +1330,7 @@ public class BlueprintConfigurationProcessorTest {
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -1337,7 +1342,7 @@ public class BlueprintConfigurationProcessorTest {
     yarnSiteProperties.put("yarn.timeline-service.webapp.https.address", "0.0.0.0" + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1357,25 +1362,25 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
-        "http://" + "%HOSTGROUP::" + expectedHostGroupName + "%" +":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
+      "http://" + "%HOSTGROUP::" + expectedHostGroupName + "%" + ":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
     assertEquals("Yarn ResourceManager hostname was incorrectly exported",
-        createExportedHostName(expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.hostname"));
+      createExportedHostName(expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.hostname"));
     assertEquals("Yarn ResourceManager tracker address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
   }
 
   @Test
@@ -1412,7 +1417,7 @@ public class BlueprintConfigurationProcessorTest {
     accumuloSiteProperties.put("instance.volumes", "hdfs://" + expectedHostName + ":" + expectedPortNum + "/apps/accumulo/data");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1435,28 +1440,28 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("hdfs config property not exported properly",
       createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.http.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.secondary.http.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.secondary.http.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.secondary.http-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.secondary.http-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
 
     assertEquals("hdfs config in core-site not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.default.name"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.default.name"));
     assertEquals("hdfs config in core-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hdfs config in hbase-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("hdfs config in accumulo-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -1492,7 +1497,7 @@ public class BlueprintConfigurationProcessorTest {
     coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1522,31 +1527,31 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("hive property not properly exported",
-        "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
+      "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
     assertEquals("hive property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.hive.properties"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
 
     assertEquals("hive zookeeper quorum property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.zookeeper.quorum"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
 
     assertEquals("hive zookeeper connectString property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
 
   }
 
@@ -1590,7 +1595,7 @@ public class BlueprintConfigurationProcessorTest {
     coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("NAMENODE");
@@ -1621,18 +1626,18 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("hive property not properly exported",
       "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo), hiveSiteProperties.get("hive.metastore.uris"));
     assertEquals("hive property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.hive.properties"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
 
     assertFalse("hive.server2.authentication.ldap.url should not have been present in the exported configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.ldap.url"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.ldap.url"));
     assertEquals("hive property not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
 
@@ -1640,12 +1645,12 @@ public class BlueprintConfigurationProcessorTest {
       createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
 
     assertEquals("hive zookeeper quorum property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.zookeeper.quorum"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
 
     assertEquals("hive zookeeper connectString property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
   }
 
   @Test
@@ -1680,7 +1685,7 @@ public class BlueprintConfigurationProcessorTest {
     coreSiteProperties.put("hadoop.proxyuser.oozie.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1704,7 +1709,7 @@ public class BlueprintConfigurationProcessorTest {
     hostGroups.add(group2);
 
     if (BlueprintConfigurationProcessor.singleHostTopologyUpdaters != null &&
-            BlueprintConfigurationProcessor.singleHostTopologyUpdaters.containsKey("oozie-site")) {
+      BlueprintConfigurationProcessor.singleHostTopologyUpdaters.containsKey("oozie-site")) {
       BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").remove("oozie.service.JPAService.jdbc.url");
     }
 
@@ -1719,13 +1724,13 @@ public class BlueprintConfigurationProcessorTest {
     assertTrue(configProcessor.getRemovePropertyUpdaters().get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url"));
 
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.base.url"));
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.base.url"));
     assertEquals("oozie property not exported correctly",
       createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
 
     // verify that the oozie properties that can refer to an external DB are not included in the export
     assertFalse("oozie_existing_mysql_host should not have been present in the exported configuration",
@@ -1856,7 +1861,7 @@ public class BlueprintConfigurationProcessorTest {
     accumuloSiteProperties.put("instance.zookeeper.host", createHostAddress(expectedHostName, expectedPortNumberOne) + "," + createHostAddress(expectedHostNameTwo, expectedPortNumberTwo));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // test hostgroups may not accurately reflect the required components for the config properties which are mapped to them
     Collection<String> groupComponents = new HashSet<>();
@@ -1884,14 +1889,14 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        coreSiteProperties.get("ha.zookeeper.quorum"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("ha.zookeeper.quorum"));
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        hbaseSiteProperties.get("hbase.zookeeper.quorum"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      hbaseSiteProperties.get("hbase.zookeeper.quorum"));
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.zookeeper.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.zookeeper.hosts"));
     assertEquals("yarn-site zookeeper config not properly exported",
       createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
       yarnSiteProperties.get("hadoop.registry.zk.quorum"));
@@ -1899,11 +1904,11 @@ public class BlueprintConfigurationProcessorTest {
       createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
       sliderClientProperties.get("slider.zookeeper.quorum"));
     assertEquals("kafka zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        kafkaBrokerProperties.get("zookeeper.connect"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      kafkaBrokerProperties.get("zookeeper.connect"));
     assertEquals("accumulo-site zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        accumuloSiteProperties.get("instance.zookeeper.host"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      accumuloSiteProperties.get("instance.zookeeper.host"));
   }
 
   @Test
@@ -1937,7 +1942,7 @@ public class BlueprintConfigurationProcessorTest {
 //    multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("KNOX_GATEWAY");
@@ -1964,17 +1969,17 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Knox for core-site config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        coreSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("hadoop.proxyuser.knox.hosts"));
     assertEquals("Knox config for WebHCat not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("webhcat.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("webhcat.proxyuser.knox.hosts"));
     assertEquals("Knox config for Oozie not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        oozieSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      oozieSiteProperties.get("hadoop.proxyuser.knox.hosts"));
     assertEquals("Knox config for Oozie not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        oozieSiteProperties.get("oozie.service.ProxyUserService.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      oozieSiteProperties.get("oozie.service.ProxyUserService.proxyuser.knox.hosts"));
   }
 
   @Test
@@ -1989,7 +1994,7 @@ public class BlueprintConfigurationProcessorTest {
     kafkaBrokerProperties.put("kafka.ganglia.metrics.host", createHostAddress(expectedHostName, expectedPortNumberOne));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("KAFKA_BROKER");
@@ -2013,8 +2018,8 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("kafka Ganglia config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
-        kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
+      kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
   }
 
   @Test
@@ -2033,7 +2038,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("worker.childopts", "some other info, undefined, more info");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("ZOOKEEPER_SERVER");
@@ -2052,7 +2057,7 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Property was incorrectly exported",
-        "%HOSTGROUP::" + expectedHostGroupName + "%", properties.get("storm.zookeeper.servers"));
+      "%HOSTGROUP::" + expectedHostGroupName + "%", properties.get("storm.zookeeper.servers"));
     assertEquals("Property with undefined host was incorrectly exported",
       "undefined", properties.get("nimbus.childopts"));
     assertEquals("Property with undefined host was incorrectly exported",
@@ -2123,10 +2128,10 @@ public class BlueprintConfigurationProcessorTest {
     parentProperties.put("yarn-site", parentYarnSiteProps);
 
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2160,7 +2165,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("yarn-site", yarnSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2179,10 +2184,10 @@ public class BlueprintConfigurationProcessorTest {
     // group 2 host group configuration
     // HG config -> BP HG config -> cluster scoped config
     Configuration group2BPConfig = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     Configuration group2Config = new Configuration(group2Properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfig);
     // set config on HG
     TestHostGroup group2 = new TestHostGroup("group2", group2Components, Collections.singleton("testhost2"), group2Config);
 
@@ -2207,7 +2212,7 @@ public class BlueprintConfigurationProcessorTest {
     properties.put("yarn-site", yarnSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2226,11 +2231,11 @@ public class BlueprintConfigurationProcessorTest {
     // group 2 host group configuration
     // HG config -> BP HG config -> cluster scoped config
     Configuration group2BPConfig = new Configuration(group2BPProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     // can't set parent here because it is reset in cluster topology
     Configuration group2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     // set config on HG
     TestHostGroup group2 = new TestHostGroup("group2", group2Components, Collections.singleton("testhost2"), group2Config);
 
@@ -2811,19 +2816,19 @@ public class BlueprintConfigurationProcessorTest {
 
     // verify that the expected hostname was substituted for the host group name in the config
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
     // verify that the Blueprint config processor has set the internal required properties
     // that determine the active and standby node hostnames for this HA setup
@@ -2831,22 +2836,22 @@ public class BlueprintConfigurationProcessorTest {
     String activeHost = hadoopEnvProperties.get("dfs_ha_initial_namenode_active");
     if (activeHost.equals(expectedHostName)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-          expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
     } else if (activeHost.equals(expectedHostNameTwo)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-          expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
     } else {
       fail("Active Namenode hostname was not set correctly: " + activeHost);
     }
 
     assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hbase.rootdir should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -2854,7 +2859,7 @@ public class BlueprintConfigurationProcessorTest {
     final String expectedHostGroupName = "host_group_1";
 
     final String expectedPropertyValue =
-        "hive.metastore.local=false,hive.metastore.uris=thrift://headnode0.ivantestcluster2-ssh.d1.internal.cloudapp.net:9083,hive.user.install.directory=/user";
+      "hive.metastore.local=false,hive.metastore.uris=thrift://headnode0.ivantestcluster2-ssh.d1.internal.cloudapp.net:9083,hive.user.install.directory=/user";
 
     Map<String, Map<String, String>> configProperties = new HashMap<>();
     Map<String, String> webHCatSiteProperties = new HashMap<>();
@@ -2931,8 +2936,8 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
-        expectedMetaStoreURIs,
-        hiveSiteProperties.get("hive.metastore.uris"));
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
   }
 
   @Test
@@ -2941,10 +2946,10 @@ public class BlueprintConfigurationProcessorTest {
     final String expectedHostGroupNameTwo = "host_group_2";
 
     final String expectedHostNameOne =
-        "c6401.ambari.apache.org";
+      "c6401.ambari.apache.org";
 
     final String expectedHostNameTwo =
-        "c6402.ambari.apache.org";
+      "c6402.ambari.apache.org";
 
 
     // use exported HOSTGROUP syntax for this property, to make sure the
@@ -2995,8 +3000,8 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
-        expectedMetaStoreURIs,
-        hiveSiteProperties.get("hive.metastore.uris"));
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
   }
 
   @Test
@@ -3020,12 +3025,12 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   private void testHiveMetastoreHA(String separator) throws InvalidTopologyException, ConfigurationTopologyException {
-    final String[] parts = new String[] {
+    final String[] parts = new String[]{
       "hive.metastore.local=false",
       "hive.metastore.uris=" + getThriftURI("localhost"),
       "hive.metastore.sasl.enabled=false"
     };
-    final String[] hostNames = new String[] { "c6401.ambari.apache.org", "example.com", "c6402.ambari.apache.org" };
+    final String[] hostNames = new String[]{"c6401.ambari.apache.org", "example.com", "c6402.ambari.apache.org"};
     final Set<String> expectedUris = new HashSet<>();
     for (String hostName : hostNames) {
       expectedUris.add(getThriftURI(hostName));
@@ -3138,7 +3143,7 @@ public class BlueprintConfigurationProcessorTest {
 
     // simulate the Oozie HA configuration
     oozieSiteProperties.put("oozie.services.ext",
-        "org.apache.oozie.service.ZKLocksService,org.apache.oozie.service.ZKXLogStreamingService,org.apache.oozie.service.ZKJobsConcurrencyService,org.apache.oozie.service.ZKUUIDService");
+      "org.apache.oozie.service.ZKLocksService,org.apache.oozie.service.ZKXLogStreamingService,org.apache.oozie.service.ZKJobsConcurrencyService,org.apache.oozie.service.ZKUUIDService");
 
     oozieEnvProperties.put("oozie_existing_mysql_host", expectedExternalHost);
 
@@ -3164,13 +3169,13 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.base.url"));
+      expectedHostName, oozieSiteProperties.get("oozie.base.url"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
+      expectedHostName, oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
+      expectedHostName, oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName + "," + expectedHostNameTwo, coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
+      expectedHostName + "," + expectedHostNameTwo, coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
   }
 
   @Test
@@ -3253,7 +3258,7 @@ public class BlueprintConfigurationProcessorTest {
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -3293,21 +3298,21 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("Yarn ResourceManager hostname was incorrectly exported",
       expectedHostName, yarnSiteProperties.get("yarn.resourcemanager.hostname"));
     assertEquals("Yarn ResourceManager tracker address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly updated",
       createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
   }
 
   @Test
@@ -3323,7 +3328,7 @@ public class BlueprintConfigurationProcessorTest {
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -3419,7 +3424,7 @@ public class BlueprintConfigurationProcessorTest {
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostGroupNameTwo = "host_group_2";
     final String expectedQuorumJournalURL = "qjournal://" + createHostAddress(expectedHostNameOne, expectedPortNum) + ";" +
-        createHostAddress(expectedHostNameTwo, expectedPortNum) + "/mycluster";
+      createHostAddress(expectedHostNameTwo, expectedPortNum) + "/mycluster";
 
     Map<String, Map<String, String>> configProperties = new HashMap<>();
     Map<String, String> hdfsSiteProperties = new HashMap<>();
@@ -3449,8 +3454,8 @@ public class BlueprintConfigurationProcessorTest {
 
     // expect that all servers are included in configuration property without changes, and that the qjournal URL format is preserved
     assertEquals("HDFS HA shared edits directory property should not have been modified, since FQDNs were specified.",
-        expectedQuorumJournalURL,
-        hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+      expectedQuorumJournalURL,
+      hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
   }
 
   @Test
@@ -3600,7 +3605,7 @@ public class BlueprintConfigurationProcessorTest {
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("nimbus.seeds");
 
     assertEquals("nimbus.seeds property should not be updated when FQDNs are specified in configuration",
-                 expectedValue, updatedVal);
+      expectedValue, updatedVal);
   }
 
 
@@ -4091,8 +4096,6 @@ public class BlueprintConfigurationProcessorTest {
     }
 
 
-
-
   }
 
   @Test
@@ -4718,9 +4721,9 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertTrue("hive.server2.authentication.kerberos.keytab should have been included in configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
     assertTrue("hive.server2.authentication.kerberos.principal should have been included in configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
   }
 
   @Test
@@ -4754,22 +4757,22 @@ public class BlueprintConfigurationProcessorTest {
       };
 
     Stack.ConfigProperty configProperty2 =
-        new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.master.classes", "") {
-          @Override
-          Set<PropertyDependencyInfo> getDependsOnProperties() {
-            PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
-            return Collections.singleton(dependencyInfo);
-          }
-        };
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.master.classes", "") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
 
     Stack.ConfigProperty configProperty3 =
-        new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.region.classes", "") {
-          @Override
-          Set<PropertyDependencyInfo> getDependsOnProperties() {
-            PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
-            return Collections.singleton(dependencyInfo);
-          }
-        };
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.region.classes", "") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
 
     mapOfMetadata.put("hbase.coprocessor.regionserver.classes", configProperty1);
     mapOfMetadata.put("hbase.coprocessor.master.classes", configProperty2);
@@ -4783,7 +4786,7 @@ public class BlueprintConfigurationProcessorTest {
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
 
-      // customized stack calls for this test only
+    // customized stack calls for this test only
     expect(stack.getServiceForConfigType("hbase-site")).andReturn("HBASE").atLeastOnce();
     expect(stack.getConfigurationPropertiesWithMetadata("HBASE", "hbase-site")).andReturn(mapOfMetadata).atLeastOnce();
 
@@ -4874,7 +4877,7 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     assertTrue("hbase.coprocessor.regionserver.classes should have been included in configuration",
-        hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
+      hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
 
   }
 
@@ -4958,31 +4961,31 @@ public class BlueprintConfigurationProcessorTest {
     updater.doUpdateForClusterCreate();
 
     List<String> hostArray =
-        Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
+      Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
     List<String> expected =
-        Arrays.asList("c6401.ambari.apache.org:6667","c6402.ambari.apache.org:6667", "c6403.ambari.apache.org:6667");
+      Arrays.asList("c6401.ambari.apache.org:6667", "c6402.ambari.apache.org:6667", "c6403.ambari.apache.org:6667");
 
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.kafka.zookeeper.connect").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org:2181","c6402.ambari.apache.org:2181", "c6403.ambari.apache.org:2181");
+      Arrays.asList("c6401.ambari.apache.org:2181", "c6402.ambari.apache.org:2181", "c6403.ambari.apache.org:2181");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.index.search.solr.zookeeper-url").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr","c6402.ambari.apache.org:2181/ambari-solr", "c6403.ambari.apache.org:2181/ambari-solr");
+      Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr", "c6402.ambari.apache.org:2181/ambari-solr", "c6403.ambari.apache.org:2181/ambari-solr");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.storage.hostname").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.audit.hbase.zookeeper.quorum").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
   }
 
@@ -5240,13 +5243,13 @@ public class BlueprintConfigurationProcessorTest {
 
 
     assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
-                 "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hbase.rootdir should not be modified by cluster update when NameNode HA is enabled.",
       "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
 
     // verify that the non-HA properties are filtered out in HA mode
     assertFalse("dfs.namenode.http-address should have been filtered out of this HA configuration",
@@ -5395,17 +5398,17 @@ public class BlueprintConfigurationProcessorTest {
     // all of these dynamic props will be set to the same host in this case where there is a single host group
     // with multiple hosts.  This may not be correct and a Jira is being filed to track this issue.
     String expectedPropertyValue = hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne);
-    if (! expectedPropertyValue.equals(expectedHostName + ":" + expectedPortNum) &&
-        ! expectedPropertyValue.equals(expectedHostNameTwo + ":" + expectedPortNum)) {
+    if (!expectedPropertyValue.equals(expectedHostName + ":" + expectedPortNum) &&
+      !expectedPropertyValue.equals(expectedHostNameTwo + ":" + expectedPortNum)) {
       fail("HTTPS address HA property not properly exported");
     }
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
       hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
@@ -5617,9 +5620,9 @@ public class BlueprintConfigurationProcessorTest {
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String

<TRUNCATED>

[11/34] ambari git commit: AMBARI-20714 - Upgrade Catalog 2.5.1 Changes For HRC Background Task (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-20714 - Upgrade Catalog 2.5.1 Changes For HRC Background Task (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4f419689
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4f419689
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4f419689

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4f419689503fb634f402e0b4c5d697b8d382006b
Parents: a5dc75e
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Apr 11 12:50:55 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Apr 13 08:45:14 2017 -0400

----------------------------------------------------------------------
 .../orm/entities/HostRoleCommandEntity.java     |   2 +-
 .../server/upgrade/SchemaUpgradeHelper.java     |   1 +
 .../server/upgrade/UpgradeCatalog251.java       |  81 +++++++++
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   2 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   2 +-
 .../server/upgrade/UpgradeCatalog251Test.java   | 166 +++++++++++++++++++
 10 files changed, 255 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
index 6197940..86feceb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
@@ -201,7 +201,7 @@ public class HostRoleCommandEntity {
   private TopologyLogicalTaskEntity topologyLogicalTaskEntity;
 
   @Basic
-  @Column(name = "is_background_command", nullable = false)
+  @Column(name = "is_background", nullable = false)
   private short isBackgroundCommand = 0;
 
   public Long getTaskId() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index 590a3e8..d022f1f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -189,6 +189,7 @@ public class SchemaUpgradeHelper {
       catalogBinder.addBinding().to(UpgradeCatalog2402.class);
       catalogBinder.addBinding().to(UpgradeCatalog242.class);
       catalogBinder.addBinding().to(UpgradeCatalog250.class);
+      catalogBinder.addBinding().to(UpgradeCatalog251.class);
       catalogBinder.addBinding().to(UpgradeCatalog300.class);
       catalogBinder.addBinding().to(FinalUpgradeCatalog.class);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
new file mode 100644
index 0000000..a5f38a9
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+
+/**
+ * The {@link UpgradeCatalog251} upgrades Ambari from 2.5.0 to 2.5.1.
+ */
+public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
+
+  static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
+  static final String HRC_IS_BACKGROUND_COLUMN = "is_background";
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getSourceVersion() {
+    return "2.5.0";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getTargetVersion() {
+    return "2.5.1";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDDLUpdates() throws AmbariException, SQLException {
+    addBackgroundColumnToHostRoleCommand();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executePreDMLUpdates() throws AmbariException, SQLException {
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDMLUpdates() throws AmbariException, SQLException {
+  }
+
+  /**
+   * Adds the {@value #HRC_IS_BACKGROUND_COLUMN} column to the
+   * {@value #HOST_ROLE_COMMAND_TABLE} table.
+   *
+   * @throws SQLException
+   */
+  private void addBackgroundColumnToHostRoleCommand() throws SQLException {
+    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
+        new DBColumnInfo(HRC_IS_BACKGROUND_COLUMN, Short.class, null, 0, false));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 5785a9d..fd49b94 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -383,7 +383,7 @@ CREATE TABLE host_role_command (
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 96ef0ac..9fc3209 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -404,7 +404,7 @@ CREATE TABLE host_role_command (
   structured_out LONGBLOB,
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 3396ce9..310208d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -384,7 +384,7 @@ CREATE TABLE host_role_command (
   structured_out BLOB NULL,
   command_detail VARCHAR2(255) NULL,
   custom_command_name VARCHAR2(255) NULL,
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index c6bfa94..c052104 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -383,7 +383,7 @@ CREATE TABLE host_role_command (
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index bbf5d3c..5a58ef8 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -382,7 +382,7 @@ CREATE TABLE host_role_command (
   structured_out IMAGE,
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 13ab01d..8c7a31d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -388,7 +388,7 @@ CREATE TABLE host_role_command (
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4f419689/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
new file mode 100644
index 0000000..4575998
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.Capture;
+import org.easymock.EasyMockRunner;
+import org.easymock.Mock;
+import org.easymock.MockType;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import com.google.gson.Gson;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.Provider;
+
+/**
+ * {@link UpgradeCatalog251} unit tests.
+ */
+@RunWith(EasyMockRunner.class)
+public class UpgradeCatalog251Test {
+
+  //  private Injector injector;
+  @Mock(type = MockType.STRICT)
+  private Provider<EntityManager> entityManagerProvider;
+
+  @Mock(type = MockType.NICE)
+  private EntityManager entityManager;
+
+  @Mock(type = MockType.NICE)
+  private DBAccessor dbAccessor;
+
+  @Mock(type = MockType.NICE)
+  private Configuration configuration;
+
+  @Mock(type = MockType.NICE)
+  private Connection connection;
+
+  @Mock(type = MockType.NICE)
+  private Statement statement;
+
+  @Mock(type = MockType.NICE)
+  private ResultSet resultSet;
+
+  @Mock(type = MockType.NICE)
+  private OsFamily osFamily;
+
+  @Mock(type = MockType.NICE)
+  private KerberosHelper kerberosHelper;
+
+  @Mock(type = MockType.NICE)
+  private ActionManager actionManager;
+
+  @Mock(type = MockType.NICE)
+  private Config config;
+
+  @Mock(type = MockType.STRICT)
+  private Service service;
+
+  @Mock(type = MockType.NICE)
+  private Clusters clusters;
+
+  @Mock(type = MockType.NICE)
+  private Cluster cluster;
+
+  @Mock(type = MockType.NICE)
+  private Injector injector;
+
+  @Before
+  public void init() {
+    reset(entityManagerProvider, injector);
+
+    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
+
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper).anyTimes();
+
+    replay(entityManagerProvider, injector);
+  }
+
+  @After
+  public void tearDown() {
+  }
+
+  @Test
+  public void testExecuteDDLUpdates() throws Exception {
+    Capture<DBColumnInfo> hrcBackgroundColumnCapture = newCapture();
+    dbAccessor.addColumn(eq(UpgradeCatalog251.HOST_ROLE_COMMAND_TABLE), capture(hrcBackgroundColumnCapture));
+
+    expect(dbAccessor.getConnection()).andReturn(connection).anyTimes();
+    expect(connection.createStatement()).andReturn(statement).anyTimes();
+    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet).anyTimes();
+    expect(configuration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
+
+    replay(dbAccessor, configuration, connection, statement, resultSet);
+
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(osFamily);
+        binder.bind(EntityManager.class).toInstance(entityManager);
+        binder.bind(Configuration.class).toInstance(configuration);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog251 upgradeCatalog251 = injector.getInstance(UpgradeCatalog251.class);
+    upgradeCatalog251.executeDDLUpdates();
+
+    verify(dbAccessor);
+
+    DBColumnInfo captured = hrcBackgroundColumnCapture.getValue();
+    Assert.assertEquals(UpgradeCatalog251.HRC_IS_BACKGROUND_COLUMN, captured.getName());
+    Assert.assertEquals(Integer.valueOf(0), captured.getDefaultValue());
+    Assert.assertEquals(Short.class, captured.getType());
+  }
+}


[15/34] ambari git commit: AMBARI-20666 - beginning or ending commas in dfs.cluster.administrators cause trouble for starting services

Posted by nc...@apache.org.
AMBARI-20666 - beginning or ending commas in dfs.cluster.administrators cause trouble for starting services


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/269ac0a1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/269ac0a1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/269ac0a1

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 269ac0a1f28d26180a1f684869a884d18f96d086
Parents: b9c82ad
Author: Tim Thorpe <tt...@apache.org>
Authored: Thu Apr 13 10:51:36 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Thu Apr 13 10:51:36 2017 -0700

----------------------------------------------------------------------
 .../HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py  | 4 ++--
 .../HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py    | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/269ac0a1/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 4d0de7f..1f17cd1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -109,8 +109,8 @@ def create_users_and_groups(user_and_groups):
   if len(parts) == 1:
     parts.append("")
 
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
 
   # skip creating groups and users if * is provided as value.
   users_list = filter(lambda x: x != '*' , users_list)

http://git-wip-us.apache.org/repos/asf/ambari/blob/269ac0a1/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
index 320872e..5d79084 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -109,8 +109,8 @@ def create_users_and_groups(user_and_groups):
   if len(parts) == 1:
     parts.append("")
 
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
 
   if users_list:
     User(users_list,


[18/34] ambari git commit: AMBARI-19996 Perform Kinit on Kafka Start (Bharat Viswanadham via dili)

Posted by nc...@apache.org.
AMBARI-19996 Perform Kinit on Kafka Start (Bharat Viswanadham via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/14c1ffd3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/14c1ffd3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/14c1ffd3

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 14c1ffd3288742354f17cbf49b6cebfee913a16f
Parents: 0c778e7
Author: Di Li <di...@apache.org>
Authored: Thu Apr 13 16:54:34 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Apr 13 16:54:34 2017 -0400

----------------------------------------------------------------------
 .../KAFKA/0.8.1/package/scripts/kafka_broker.py                | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/14c1ffd3/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
index 96a8293..81715f9 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
@@ -77,6 +77,12 @@ class KafkaBroker(Script):
     import params
     env.set_params(params)
     self.configure(env, upgrade_type=upgrade_type)
+
+    if params.security_enabled:
+      if params.version and check_stack_feature(StackFeature.KAFKA_KERBEROS, params.version):
+        kafka_kinit_cmd = format("{kinit_path_local} -kt {kafka_keytab_path} {kafka_jaas_principal};")
+        Execute(kafka_kinit_cmd, user=params.kafka_user)
+
     if params.is_supported_kafka_ranger:
       setup_ranger_kafka() #Ranger Kafka Plugin related call 
     daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')