You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/07/24 15:26:27 UTC

[01/50] [abbrv] ambari git commit: AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-12556 c0201e248 -> 6283ae4f6


AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31b9d777
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31b9d777
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31b9d777

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 31b9d7774b22f59a4d7120c9836c73a5216fd529
Parents: 383b8c7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Jul 12 15:35:53 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Jul 12 15:35:53 2017 +0300

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  2 ++
 .../services/HIVE/configuration/hive-site.xml   | 35 ++++++++++++++++++++
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  2 ++
 4 files changed, 41 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 36725c3..8e176b6 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -224,7 +224,8 @@ def setup_hiveserver2():
                          type="directory",
                           action="create_on_execute",
                           owner=params.hive_user,
-                          mode=0777
+                          group=params.user_group,
+                          mode=params.hive_apps_whs_mode
     )
   else:
     Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 078076a..21b3d8b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -505,6 +505,8 @@ hive_env_sh_template = config['configurations']['hive-env']['content']
 
 hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0755
+#Parameter for custom warehouse directory permissions. Permissions are in octal format and need to be converted to decimal
+hive_apps_whs_mode = int(default('/configurations/hive-site/custom.hive.warehouse.mode', '0777'), 8)
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
 hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..a07c16f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hive.warehouse.subdir.inherit.perms</name>
+    <value>true</value>
+    <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.start.cleanup.scratchdir</name>
+    <value>false</value>
+    <description>To cleanup the hive scratchdir while starting the hive server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31b9d777/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index ae2ec86..fc6d14e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -491,6 +491,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -703,6 +704,7 @@ class TestHiveServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        group = 'hadoop',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',


[30/50] [abbrv] ambari git commit: AMBARI-21520. Ambari server logs NPE with no additional stack trace on any host component start/stop command.(vbrodetskyi)

Posted by nc...@apache.org.
AMBARI-21520. Ambari server logs NPE with no additional stack trace on any host component start/stop command.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/30cd7157
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/30cd7157
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/30cd7157

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 30cd715793183e0c00ea7fdb900482cfcdc13a8a
Parents: 4b189a1
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Jul 19 17:18:27 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Jul 19 17:19:11 2017 +0300

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         |  3 ++
 .../ambari/server/state/ConfigHelper.java       | 29 ++++++++++++++++++--
 2 files changed, 30 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/30cd7157/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 38842fa..fac7b94 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2707,6 +2707,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       return requestStages;
     }
 
+    // check all stack configs are present in desired configs
+    configHelper.checkAllStageConfigsPresentInDesiredConfigs(cluster);
+
     // caching upgrade suspended
     boolean isUpgradeSuspended = cluster.isUpgradeSuspended();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/30cd7157/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 2a70ee1..e8250fe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -699,7 +699,9 @@ public class ConfigHelper {
     for (PropertyInfo stackProperty : stackProperties) {
       if (stackProperty.getPropertyTypes().contains(propertyType)) {
         String stackPropertyConfigType = fileNameToConfigType(stackProperty.getFilename());
-        result.put(stackProperty, actualConfigs.get(stackPropertyConfigType).getProperties().get(stackProperty.getName()));
+        if (actualConfigs.containsKey(stackPropertyConfigType)) {
+          result.put(stackProperty, actualConfigs.get(stackPropertyConfigType).getProperties().get(stackProperty.getName()));
+        }
       }
     }
 
@@ -776,13 +778,36 @@ public class ConfigHelper {
     for (PropertyInfo stackProperty : stackProperties) {
       if (stackProperty.getPropertyTypes().contains(propertyType)) {
         String stackPropertyConfigType = fileNameToConfigType(stackProperty.getFilename());
-        result.add(actualConfigs.get(stackPropertyConfigType).getProperties().get(stackProperty.getName()));
+        if (actualConfigs.containsKey(stackPropertyConfigType)) {
+          result.add(actualConfigs.get(stackPropertyConfigType).getProperties().get(stackProperty.getName()));
+        }
       }
     }
 
     return result;
   }
 
+  public void checkAllStageConfigsPresentInDesiredConfigs(Cluster cluster) throws AmbariException {
+    StackId stackId = cluster.getDesiredStackVersion();
+    Set<String> stackConfigTypes = ambariMetaInfo.getStack(stackId.getStackName(),
+            stackId.getStackVersion()).getConfigTypeAttributes().keySet();
+    Map<String, Config> actualConfigs = new HashMap<>();
+    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
+
+    for (Map.Entry<String, DesiredConfig> desiredConfigEntry : desiredConfigs.entrySet()) {
+      String configType = desiredConfigEntry.getKey();
+      DesiredConfig desiredConfig = desiredConfigEntry.getValue();
+      actualConfigs.put(configType, cluster.getConfig(configType, desiredConfig.getTag()));
+    }
+
+    for (String stackConfigType : stackConfigTypes) {
+      if (!actualConfigs.containsKey(stackConfigType)) {
+        LOG.error(String.format("Unable to find stack configuration %s in ambari configs!", stackConfigType));
+      }
+    }
+
+  }
+
   /***
    * Fetch all the config values of a given PropertyType. For eg: Fetch all stack configs that are of type "user"
    * @param cluster


[49/50] [abbrv] ambari git commit: AMBARI-21532. Namenode restart - PID file delete happens before the call to check status (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-21532. Namenode restart - PID file delete happens before the call to check status (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f500c9e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f500c9e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f500c9e4

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f500c9e48d8b9ed7b3aebe547720da9dc9fc10e5
Parents: bfe772b
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Mon Jul 24 16:59:26 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Mon Jul 24 16:59:26 2017 +0300

----------------------------------------------------------------------
 .../libraries/functions/check_process_status.py | 20 ++++++++++++++++++++
 .../libraries/script/script.py                  |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |  4 ++++
 .../HDFS/3.0.0.3.0/package/scripts/utils.py     |  2 ++
 .../0.8/services/HDFS/package/scripts/utils.py  |  3 +++
 5 files changed, 30 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f500c9e4/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py b/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
index 7961f00..ac54bc9 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
@@ -59,3 +59,23 @@ def check_process_status(pid_file):
     Logger.info("Process with pid {0} is not running. Stale pid file"
               " at {1}".format(pid, pid_file))
     raise ComponentIsNotRunning()
+
+
+def wait_process_stopped(pid_file):
+  """
+    Waits until component is actually stopped (check is performed using
+    check_process_status() method.
+    """
+  import time
+  component_is_stopped = False
+  counter = 0
+  while not component_is_stopped:
+    try:
+      if counter % 10 == 0:
+        Logger.logger.info("Waiting for actual component stop")
+      check_process_status(pid_file)
+      time.sleep(1)
+      counter += 1
+    except ComponentIsNotRunning, e:
+      Logger.logger.debug(" reports ComponentIsNotRunning")
+      component_is_stopped = True

http://git-wip-us.apache.org/repos/asf/ambari/blob/f500c9e4/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2b374c5..c2c89c4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -411,7 +411,7 @@ class Script(object):
     status_method = getattr(self, 'status')
     component_is_stopped = False
     counter = 0
-    while not component_is_stopped :
+    while not component_is_stopped:
       try:
         if counter % 100 == 0:
           Logger.logger.info("Waiting for actual component stop")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f500c9e4/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index d861ba9..2535f60 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -24,6 +24,7 @@ import ambari_simplejson as json # simplejson is much faster comparing to Python
 from resource_management.core.resources.system import Directory, File, Execute
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.functions.check_process_status import wait_process_stopped
 from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.core import shell
@@ -281,6 +282,9 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     except:
       show_logs(log_dir, user)
       raise
+
+    wait_process_stopped(pid_file)
+
     File(pid_file, action="delete")
 
 def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f500c9e4/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
index 53774c6..0c28a00 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
@@ -22,6 +22,7 @@ import urllib2
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
 from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.libraries.functions.check_process_status import wait_process_stopped
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.functions import StackFeature
@@ -278,6 +279,7 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     except:
       show_logs(log_dir, user)
       raise
+    wait_process_stopped(pid_file)
     File(pid_file, action="delete")
 
 def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f500c9e4/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
index 7dcbca8..f7febb0 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
@@ -19,6 +19,7 @@ limitations under the License.
 import os
 
 from resource_management import *
+from resource_management.libraries.functions.check_process_status import wait_process_stopped
 import re
 
 
@@ -123,6 +124,8 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
           not_if=service_is_up
   )
   if action == "stop":
+    wait_process_stopped(pid_file)
+
     File(pid_file,
          action="delete",
     )


[14/50] [abbrv] ambari git commit: AMBARI-21482. Blueprints: HSI config 'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated and recommended via Stack Advisor during Blueprint install only if 'num_llap_nodes' config value is not pr

Posted by nc...@apache.org.
AMBARI-21482. Blueprints: HSI config 'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated and recommended via Stack Advisor during Blueprint install only if 'num_llap_nodes' config value is not provided in Blueprint.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e799f522
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e799f522
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e799f522

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: e799f52268db9330a84b1e982b3b88e591b04649
Parents: c7f4228
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Fri Jul 14 18:15:52 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Fri Jul 14 22:27:56 2017 -0700

----------------------------------------------------------------------
 .../main/resources/stacks/HDP/2.5/services/stack_advisor.py    | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e799f522/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 4ca74ee..1c19d8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1013,8 +1013,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       # Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
 
       # Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
-      # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case)
-      if (changed_configs_has_enable_hive_int or  0 == len(services['changed-configurations'])) \
+      # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case and 'num_llap_nodes' not defined)
+      if (changed_configs_has_enable_hive_int
+          or (0 == len(services['changed-configurations'])
+              and not services['configurations']['hive-interactive-env']['properties']['num_llap_nodes'])) \
         and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
         num_llap_nodes_requested = min_nodes_required
       else:


[45/50] [abbrv] ambari git commit: AMBARI-21552 - Pass Repository ID To Upgrade Prechecks (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21552 - Pass Repository ID To Upgrade Prechecks (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d77bde7f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d77bde7f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d77bde7f

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: d77bde7fde932529dca524bf7112dd64f5c56b4d
Parents: eca5599
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 21 14:55:49 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 21 23:44:39 2017 -0400

----------------------------------------------------------------------
 .../server/controller/PrereqCheckRequest.java   | 28 ++++----
 .../PreUpgradeCheckResourceProvider.java        | 70 ++++++++++++--------
 .../ambari/server/state/UpgradeContext.java     | 15 ++---
 .../ambari/server/state/UpgradeHelper.java      | 59 ++++++++---------
 .../checks/AbstractCheckDescriptorTest.java     | 11 ++-
 .../server/checks/AtlasPresenceCheckTest.java   |  4 +-
 .../checks/ClientRetryPropertyCheckTest.java    | 22 +++++-
 .../checks/ComponentsInstallationCheckTest.java | 16 ++++-
 .../checks/ConfigurationMergeCheckTest.java     | 13 ++--
 .../HiveDynamicServiceDiscoveryCheckTest.java   | 23 +++++--
 .../checks/HiveMultipleMetastoreCheckTest.java  | 26 +++++++-
 .../server/checks/HostsHeartbeatCheckTest.java  | 16 ++++-
 .../checks/HostsMasterMaintenanceCheckTest.java | 27 +++++++-
 .../checks/HostsRepositoryVersionCheckTest.java | 40 +++++++----
 .../server/checks/InstallPackagesCheckTest.java | 18 +++--
 ...duce2JobHistoryStatePreservingCheckTest.java |  7 +-
 .../checks/PreviousUpgradeCompletedTest.java    | 11 ++-
 .../server/checks/ServicePresenceCheckTest.java | 13 +++-
 .../ServicesMaintenanceModeCheckTest.java       | 16 ++++-
 .../ServicesNamenodeTruncateCheckTest.java      | 19 ++++--
 .../server/checks/ServicesUpCheckTest.java      | 17 ++++-
 ...nTimelineServerStatePreservingCheckTest.java |  7 +-
 .../PreUpgradeCheckResourceProviderTest.java    |  6 +-
 .../ambari/server/state/UpgradeHelperTest.java  | 14 ++--
 24 files changed, 349 insertions(+), 149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
index c8c9f9e..bd207ae 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
@@ -21,6 +21,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.server.checks.CheckDescription;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
@@ -31,9 +32,8 @@ import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
  */
 public class PrereqCheckRequest {
   private String m_clusterName;
-  private String m_repositoryVersion;
   private StackId m_sourceStackId;
-  private StackId m_targetStackId;
+  private RepositoryVersionEntity m_targetRepositoryVersion;
   private PrerequisiteCheckConfig m_prereqCheckConfig;
 
   private UpgradeType m_upgradeType;
@@ -65,11 +65,11 @@ public class PrereqCheckRequest {
   }
 
   public String getRepositoryVersion() {
-    return m_repositoryVersion;
-  }
+    if (null == m_targetRepositoryVersion) {
+      return null;
+    }
 
-  public void setRepositoryVersion(String repositoryVersion) {
-    m_repositoryVersion = repositoryVersion;
+    return m_targetRepositoryVersion.getVersion();
   }
 
   /**
@@ -115,17 +115,21 @@ public class PrereqCheckRequest {
    * @return the targetStackId
    */
   public StackId getTargetStackId() {
-    return m_targetStackId;
+    if (null == m_targetRepositoryVersion) {
+      return null;
+    }
+
+    return m_targetRepositoryVersion.getStackId();
   }
 
   /**
-   * Sets the target stack of the upgrade.
+   * Sets the target of the upgrade.
    *
-   * @param targetStackId
-   *          the targetStackId to set
+   * @param targetRepositoryVersion
+   *          the target repository version
    */
-  public void setTargetStackId(StackId targetStackId) {
-    m_targetStackId = targetStackId;
+  public void setTargetRepositoryVersion(RepositoryVersionEntity targetRepositoryVersion) {
+    m_targetRepositoryVersion = targetRepositoryVersion;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
index ea8fb37..24a55c1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
@@ -47,11 +47,13 @@ import org.apache.ambari.server.state.CheckHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.UpgradeHelper;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -68,20 +70,21 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
 
   //----- Property ID constants ---------------------------------------------
 
-  public static final String UPGRADE_CHECK_ID_PROPERTY_ID                 = PropertyHelper.getPropertyId("UpgradeChecks", "id");
-  public static final String UPGRADE_CHECK_CHECK_PROPERTY_ID              = PropertyHelper.getPropertyId("UpgradeChecks", "check");
-  public static final String UPGRADE_CHECK_STATUS_PROPERTY_ID             = PropertyHelper.getPropertyId("UpgradeChecks", "status");
-  public static final String UPGRADE_CHECK_REASON_PROPERTY_ID             = PropertyHelper.getPropertyId("UpgradeChecks", "reason");
-  public static final String UPGRADE_CHECK_FAILED_ON_PROPERTY_ID          = PropertyHelper.getPropertyId("UpgradeChecks", "failed_on");
-  public static final String UPGRADE_CHECK_FAILED_DETAIL_PROPERTY_ID      = PropertyHelper.getPropertyId("UpgradeChecks", "failed_detail");
-  public static final String UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID         = PropertyHelper.getPropertyId("UpgradeChecks", "check_type");
-  public static final String UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID       = PropertyHelper.getPropertyId("UpgradeChecks", "cluster_name");
-  public static final String UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID       = PropertyHelper.getPropertyId("UpgradeChecks", "upgrade_type");
+  public static final String UPGRADE_CHECK_ID_PROPERTY_ID                  = PropertyHelper.getPropertyId("UpgradeChecks", "id");
+  public static final String UPGRADE_CHECK_CHECK_PROPERTY_ID               = PropertyHelper.getPropertyId("UpgradeChecks", "check");
+  public static final String UPGRADE_CHECK_STATUS_PROPERTY_ID              = PropertyHelper.getPropertyId("UpgradeChecks", "status");
+  public static final String UPGRADE_CHECK_REASON_PROPERTY_ID              = PropertyHelper.getPropertyId("UpgradeChecks", "reason");
+  public static final String UPGRADE_CHECK_FAILED_ON_PROPERTY_ID           = PropertyHelper.getPropertyId("UpgradeChecks", "failed_on");
+  public static final String UPGRADE_CHECK_FAILED_DETAIL_PROPERTY_ID       = PropertyHelper.getPropertyId("UpgradeChecks", "failed_detail");
+  public static final String UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID          = PropertyHelper.getPropertyId("UpgradeChecks", "check_type");
+  public static final String UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID        = PropertyHelper.getPropertyId("UpgradeChecks", "cluster_name");
+  public static final String UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID        = PropertyHelper.getPropertyId("UpgradeChecks", "upgrade_type");
+  public static final String UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version_id");
+
   /**
    * Optional parameter to specify the preferred Upgrade Pack to use.
    */
   public static final String UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID       = PropertyHelper.getPropertyId("UpgradeChecks", "upgrade_pack");
-  public static final String UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version");
 
   @Inject
   private static Provider<Clusters> clustersProvider;
@@ -113,8 +116,8 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
       UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID,
       UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID,
       UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID,
-      UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID,
-      UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID);
+      UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID,
+      UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID);
 
 
   @SuppressWarnings("serial")
@@ -134,6 +137,7 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
     super(propertyIds, keyPropertyIds, managementController);
   }
 
+  @Override
   public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException,
     NoSuchResourceException, NoSuchParentResourceException {
 
@@ -161,19 +165,25 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
         throw new NoSuchResourceException(ambariException.getMessage());
       }
 
-      String stackName = cluster.getCurrentStackVersion().getStackName();
-      String sourceStackVersion = cluster.getCurrentStackVersion().getStackVersion();
+      String repositoryVersionId = (String) propertyMap.get(
+          UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID);
+
+      if (StringUtils.isBlank(repositoryVersionId)) {
+        throw new SystemException(
+            String.format("%s is a required property when executing upgrade checks",
+                UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID));
+      }
+
+      final PrereqCheckRequest upgradeCheckRequest = new PrereqCheckRequest(clusterName,
+          upgradeType);
 
-      final PrereqCheckRequest upgradeCheckRequest = new PrereqCheckRequest(clusterName, upgradeType);
+      StackId sourceStackId = cluster.getCurrentStackVersion();
       upgradeCheckRequest.setSourceStackId(cluster.getCurrentStackVersion());
 
-      if (propertyMap.containsKey(UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID)) {
-        String repositoryVersionId = propertyMap.get(UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).toString();
-        RepositoryVersionEntity repositoryVersionEntity = repositoryVersionDAO.findByStackNameAndVersion(stackName, repositoryVersionId);
-        // set some required properties on the check request
-        upgradeCheckRequest.setRepositoryVersion(repositoryVersionId);
-        upgradeCheckRequest.setTargetStackId(repositoryVersionEntity.getStackId());
-      }
+      RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(
+          Long.valueOf(repositoryVersionId));
+
+      upgradeCheckRequest.setTargetRepositoryVersion(repositoryVersion);
 
       //ambariMetaInfo.getStack(stackName, cluster.getCurrentStackVersion().getStackVersion()).getUpgradePacks()
       // TODO AMBARI-12698, filter the upgrade checks to run based on the stack and upgrade type, or the upgrade pack.
@@ -182,8 +192,9 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
           (String) propertyMap.get(UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID) : null;
       try{
         // Hint: PreChecks currently executing only before UPGRADE direction
-        upgradePack = upgradeHelper.get().suggestUpgradePack(clusterName, sourceStackVersion,
-            upgradeCheckRequest.getRepositoryVersion(), Direction.UPGRADE, upgradeType, preferredUpgradePackName);
+        upgradePack = upgradeHelper.get().suggestUpgradePack(clusterName, sourceStackId,
+            repositoryVersion.getStackId(), Direction.UPGRADE, upgradeType,
+            preferredUpgradePackName);
       } catch (AmbariException e) {
         throw new SystemException(e.getMessage(), e);
       }
@@ -199,11 +210,13 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
 
       try {
         // Register all the custom prechecks from the services
-        Map<String, ServiceInfo> services = getManagementController().getAmbariMetaInfo().getServices(stackName, sourceStackVersion);
+        Map<String, ServiceInfo> services = getManagementController().getAmbariMetaInfo().getServices(
+            sourceStackId.getStackName(), sourceStackId.getStackVersion());
+
         List<AbstractCheckDescriptor> serviceLevelUpgradeChecksToRun = upgradeCheckRegistry.getServiceLevelUpgradeChecks(upgradePack, services);
         upgradeChecksToRun.addAll(serviceLevelUpgradeChecksToRun);
       } catch (ParentObjectNotFoundException parentNotFoundException) {
-        LOG.error("Invalid stack version: " + stackName + "-" + sourceStackVersion, parentNotFoundException);
+        LOG.error("Invalid stack version: {}", sourceStackId, parentNotFoundException);
       } catch (AmbariException ambariException) {
         LOG.error("Unable to register all the custom prechecks from the services", ambariException);
       } catch (Exception e) {
@@ -221,9 +234,8 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
         setResourceProperty(resource, UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID, prerequisiteCheck.getType(), requestedIds);
         setResourceProperty(resource, UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID, prerequisiteCheck.getClusterName(), requestedIds);
         setResourceProperty(resource, UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID, upgradeType, requestedIds);
-        if (upgradeCheckRequest.getRepositoryVersion() != null) {
-          setResourceProperty(resource, UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID, upgradeCheckRequest.getRepositoryVersion(), requestedIds);
-        }
+        setResourceProperty(resource, UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID, upgradeCheckRequest.getRepositoryVersion(), requestedIds);
+
         resources.add(resource);
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 1695bd3..0e02c77 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -353,12 +353,12 @@ public class UpgradeContext {
     String preferredUpgradePackName = (String) upgradeRequestMap.get(UPGRADE_PACK);
 
     @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES, comment="This is wrong")
-    String upgradePackFromVersion = cluster.getService(
-        m_services.iterator().next()).getDesiredRepositoryVersion().getVersion();
+    RepositoryVersionEntity upgradeFromRepositoryVersion = cluster.getService(
+        m_services.iterator().next()).getDesiredRepositoryVersion();
 
     m_upgradePack = m_upgradeHelper.suggestUpgradePack(m_cluster.getClusterName(),
-        upgradePackFromVersion, m_repositoryVersion.getVersion(), m_direction, m_type,
-        preferredUpgradePackName);
+        upgradeFromRepositoryVersion.getStackId(), m_repositoryVersion.getStackId(), m_direction,
+        m_type, preferredUpgradePackName);
 
     // the validator will throw an exception if the upgrade request is not valid
     UpgradeRequestValidator upgradeRequestValidator = buildValidator(m_type);
@@ -955,16 +955,13 @@ public class UpgradeContext {
         return;
       }
 
-      RepositoryVersionEntity repositoryVersion = m_repoVersionDAO.findByPK(
-          Long.valueOf(repositoryVersionId));
-
       // Validate pre-req checks pass
       PreUpgradeCheckResourceProvider provider = (PreUpgradeCheckResourceProvider) AbstractControllerResourceProvider.getResourceProvider(
           Resource.Type.PreUpgradeCheck);
 
       Predicate preUpgradeCheckPredicate = new PredicateBuilder().property(
           PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals(cluster.getClusterName()).and().property(
-          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals(repositoryVersion.getVersion()).and().property(
+          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID).equals(repositoryVersionId).and().property(
           PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(type).and().property(
           PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals(preferredUpgradePack).toPredicate();
 
@@ -1122,4 +1119,4 @@ public class UpgradeContext {
       return hostOrderItems;
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 5fdcd66..7ca6976 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -50,7 +50,6 @@ import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -190,9 +189,6 @@ public class UpgradeHelper {
   @Inject
   private Provider<Clusters> m_clusters;
 
-  @Inject
-  private Provider<RepositoryVersionDAO> m_repoVersionProvider;
-
   /**
    * Used to update the configuration properties.
    */
@@ -211,66 +207,69 @@ public class UpgradeHelper {
    *
    * @param clusterName
    *          The name of the cluster
-   * @param upgradeFromVersion
-   *          Current stack version
-   * @param upgradeToVersion
-   *          Target stack version
+   * @param sourceStackId
+   *          the "from" stack for this upgrade/downgrade
+   * @param targetStackId
+   *          the "to" stack for this upgrade/downgrade
    * @param direction
    *          {@code Direction} of the upgrade
    * @param upgradeType
    *          The {@code UpgradeType}
+   * @param targetStackName
+   *          The destination target stack name.
    * @param preferredUpgradePackName
    *          For unit test, need to prefer an upgrade pack since multiple
    *          matches can be found.
    * @return {@code UpgradeType} object
    * @throws AmbariException
    */
-  public UpgradePack suggestUpgradePack(String clusterName, String upgradeFromVersion, String upgradeToVersion,
-    Direction direction, UpgradeType upgradeType, String preferredUpgradePackName) throws AmbariException {
+  public UpgradePack suggestUpgradePack(String clusterName,
+      StackId sourceStackId, StackId targetStackId, Direction direction, UpgradeType upgradeType,
+      String preferredUpgradePackName) throws AmbariException {
 
     // Find upgrade packs based on current stack. This is where to upgrade from
     Cluster cluster = m_clusters.get().getCluster(clusterName);
-    StackId stack =  cluster.getCurrentStackVersion();
+    StackId currentStack = cluster.getCurrentStackVersion();
 
-    String repoVersion = upgradeToVersion;
+    StackId stackForUpgradePack = sourceStackId;
 
-    // TODO AMBARI-12706. Here we need to check, how this would work with SWU Downgrade
-    if (direction.isDowngrade() && null != upgradeFromVersion) {
-      repoVersion = upgradeFromVersion;
+    if (direction.isDowngrade()) {
+      stackForUpgradePack = targetStackId;
     }
 
-    RepositoryVersionEntity versionEntity = m_repoVersionProvider.get().findByStackNameAndVersion(
-        stack.getStackName(), repoVersion);
-
-    if (versionEntity == null) {
-      throw new AmbariException(String.format("Repository version %s was not found", repoVersion));
-    }
+    Map<String, UpgradePack> packs = m_ambariMetaInfoProvider.get().getUpgradePacks(
+        currentStack.getStackName(), currentStack.getStackVersion());
 
-    Map<String, UpgradePack> packs = m_ambariMetaInfoProvider.get().getUpgradePacks(stack.getStackName(), stack.getStackVersion());
     UpgradePack pack = null;
 
     if (StringUtils.isNotEmpty(preferredUpgradePackName) && packs.containsKey(preferredUpgradePackName)) {
       pack = packs.get(preferredUpgradePackName);
-    } else {
-      String repoStackId = versionEntity.getStackId().getStackId();
+    }
+
+    // Best-attempt at picking an upgrade pack assuming within the same stack whose target stack version matches.
+    // If multiple candidates are found, raise an exception.
+    if (null == pack) {
       for (UpgradePack upgradePack : packs.values()) {
-        if (null != upgradePack.getTargetStack() && upgradePack.getTargetStack().equals(repoStackId) &&
-          upgradeType == upgradePack.getType()) {
+        if (null != upgradePack.getTargetStack()
+            && StringUtils.equals(upgradePack.getTargetStack(), stackForUpgradePack.getStackId())
+            && upgradeType == upgradePack.getType()) {
           if (null == pack) {
             // Pick the pack.
             pack = upgradePack;
           } else {
             throw new AmbariException(
-                String.format("Unable to perform %s. Found multiple upgrade packs for type %s and target version %s",
-                    direction.getText(false), upgradeType.toString(), repoVersion));
+                String.format(
+                    "Unable to perform %s. Found multiple upgrade packs for type %s and stack %s",
+                    direction.getText(false), upgradeType.toString(), stackForUpgradePack));
           }
         }
       }
     }
 
     if (null == pack) {
-      throw new AmbariException(String.format("Unable to perform %s. Could not locate %s upgrade pack for version %s",
-          direction.getText(false), upgradeType.toString(), repoVersion));
+      throw new AmbariException(
+          String.format("Unable to perform %s. Could not locate %s upgrade pack for stack %s",
+              direction.getText(false), upgradeType.toString(), stackForUpgradePack));
     }
 
    return pack;

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
index 01cda02..9fbd012 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.checks;
 
+import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
@@ -229,16 +230,20 @@ public class AbstractCheckDescriptorTest {
     VersionDefinitionXml repoXml = EasyMock.createMock(VersionDefinitionXml.class);
     expect(repoVersion.getType()).andReturn(RepositoryType.PATCH).atLeastOnce();
     expect(repoVersion.getRepositoryXml()).andReturn(repoXml).atLeastOnce();
+    expect(repoVersion.getStackId()).andReturn(new StackId("HDP-2.5")).atLeastOnce();
+    expect(repoVersion.getVersion()).andReturn("2.5.0.0-1234").atLeastOnce();
     expect(repoXml.getAvailableServiceNames()).andReturn(Collections.singleton("SERVICE2")).atLeastOnce();
 
-    expect(repositoryVersionDao.findByStackNameAndVersion(
-        anyString(), anyString())).andReturn(repoVersion).atLeastOnce();
+    expect(repositoryVersionDao.findByPK(anyLong())).andReturn(repoVersion).atLeastOnce();
+
+    expect(repositoryVersionDao.findByStackNameAndVersion(anyString(), anyString())).andReturn(
+        repoVersion).atLeastOnce();
 
     replay(clusters, cluster, repositoryVersionDao, repoVersion, repoXml);
 
     AbstractCheckDescriptor check = new TestCheckImpl(PrereqCheckType.SERVICE);
     PrereqCheckRequest request = new PrereqCheckRequest(clusterName, UpgradeType.ROLLING);
-    request.setTargetStackId(new StackId("HDP-2.5"));
+    request.setTargetRepositoryVersion(repoVersion);
 
     List<String> allServicesList = Arrays.asList("SERVICE1", "SERVICE2");
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java
index 6cb4e2e..dca14ab 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java
@@ -20,9 +20,11 @@ package org.apache.ambari.server.checks;
 import static org.junit.Assert.assertEquals;
 
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 public class AtlasPresenceCheckTest {
@@ -32,7 +34,7 @@ public class AtlasPresenceCheckTest {
   public void perform() throws Exception {
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.5.0.0");
+    request.setTargetRepositoryVersion(Mockito.mock(RepositoryVersionEntity.class));
     m_check.perform(check, request);
 
     assertEquals(PrereqCheckStatus.FAIL, check.getStatus());

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
index 05a3c11..46a00b8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
@@ -22,11 +22,14 @@ import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
@@ -44,6 +47,10 @@ public class ClientRetryPropertyCheckTest {
 
   private final ClientRetryPropertyCheck m_check = new ClientRetryPropertyCheck();
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+  private final RepositoryVersionDAO repositoryVersionDAO = Mockito.mock(
+      RepositoryVersionDAO.class);
+
   /**
    *
    */
@@ -58,6 +65,9 @@ public class ClientRetryPropertyCheckTest {
     };
     Configuration config = Mockito.mock(Configuration.class);
     m_check.config = config;
+
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3"));
   }
 
   /**
@@ -73,11 +83,21 @@ public class ClientRetryPropertyCheckTest {
     Mockito.when(cluster.getServices()).thenReturn(services);
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.3.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
 
     // nothing installed
     Assert.assertFalse(m_check.isApplicable(request));
 
+    m_check.repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
+      @Override
+      public RepositoryVersionDAO get() {
+        return repositoryVersionDAO;
+      }
+    };
+
+    Mockito.when(repositoryVersionDAO.findByStackNameAndVersion(Mockito.anyString(),
+        Mockito.anyString())).thenReturn(m_repositoryVersion);
+
     // HDFS installed
     services.put("HDFS", Mockito.mock(Service.class));
     Assert.assertTrue(m_check.isApplicable(request));

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentsInstallationCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentsInstallationCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentsInstallationCheckTest.java
index 7cdd121..ef833b0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentsInstallationCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentsInstallationCheckTest.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.models.HostComponentSummary;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -39,6 +40,7 @@ import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mockito;
@@ -61,12 +63,22 @@ public class ComponentsInstallationCheckTest {
   private final Clusters clusters = Mockito.mock(Clusters.class);
   private AmbariMetaInfo ambariMetaInfo = Mockito.mock(AmbariMetaInfo.class);
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  /**
+   *
+   */
+  @Before
+  public void setup() {
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.2.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2"));
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1");
-    checkRequest.setRepositoryVersion("HDP-2.2.0.0");
     checkRequest.setSourceStackId(new StackId("HDP", "2.2"));
-    checkRequest.setTargetStackId(new StackId("HDP", "2.2"));
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
     ComponentsInstallationCheck cic = new ComponentsInstallationCheck();
     Configuration config = Mockito.mock(Configuration.class);
     cic.config = config;

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
index 1e0405b..df7bd1f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
@@ -44,6 +44,7 @@ import org.easymock.EasyMock;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import com.google.inject.Provider;
 //
@@ -61,6 +62,8 @@ public class ConfigurationMergeCheckTest {
 
   private static final StackId stackId_1_0 = new StackId("HDP-1.0");
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   @Before
   public void before() throws Exception {
     Cluster cluster = EasyMock.createMock(Cluster.class);
@@ -79,6 +82,9 @@ public class ConfigurationMergeCheckTest {
 
     expect(cluster.getDesiredConfigByType(CONFIG_TYPE)).andReturn(config).anyTimes();
 
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("1.1.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.1"));
+
     replay(clusters, cluster, config);
   }
 
@@ -86,7 +92,7 @@ public class ConfigurationMergeCheckTest {
   public void testApplicable() throws Exception {
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setTargetStackId(stackId_1_0);
+    request.setTargetRepositoryVersion(m_repositoryVersion);
 
     ConfigurationMergeCheck cmc = new ConfigurationMergeCheck();
     Configuration config = EasyMock.createMock(Configuration.class);
@@ -102,7 +108,7 @@ public class ConfigurationMergeCheckTest {
 
     final RepositoryVersionDAO repositoryVersionDAO = EasyMock.createMock(RepositoryVersionDAO.class);
     expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.0")).andReturn(createFor("1.0")).anyTimes();
-    expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.1")).andReturn(createFor("1.1")).anyTimes();
+    expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.1.0.0-1234")).andReturn(createFor("1.1")).anyTimes();
 
     replay(repositoryVersionDAO);
 
@@ -159,8 +165,7 @@ public class ConfigurationMergeCheckTest {
     replay(ami);
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setTargetStackId(stackId_1_0);
-    request.setRepositoryVersion("1.1");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, "cluster");
     cmc.perform(check, request);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java
index 15e3c6d..14fab7c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -45,6 +46,8 @@ public class HiveDynamicServiceDiscoveryCheckTest {
 
   private final HiveDynamicServiceDiscoveryCheck m_check = new HiveDynamicServiceDiscoveryCheck();
 
+  final RepositoryVersionEntity repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   /**
    *
    */
@@ -90,25 +93,37 @@ public class HiveDynamicServiceDiscoveryCheckTest {
 
     // Check HDP-2.2.x => HDP-2.2.y
     request.setSourceStackId(new StackId("HDP-2.2.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.2.8.4"));
+
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("2.2.8.4");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     m_check.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus());
 
     // Check HDP-2.2.x => HDP-2.3.y
     request.setSourceStackId(new StackId("HDP-2.2.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.3.8.4"));
+
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("2.3.8.4");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     m_check.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
     // Check HDP-2.3.x => HDP-2.3.y
     request.setSourceStackId(new StackId("HDP-2.3.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.3.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
     m_check.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
     // Check HDP-2.3.x => HDP-2.4.y
     request.setSourceStackId(new StackId("HDP-2.3.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.4.8.4"));
+
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("2.4.8.4");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.4.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     m_check.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java
index dafc15f..7a6748b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java
@@ -23,11 +23,14 @@ import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
@@ -43,6 +46,10 @@ import com.google.inject.Provider;
 public class HiveMultipleMetastoreCheckTest {
   private final Clusters m_clusters = Mockito.mock(Clusters.class);
   private final HiveMultipleMetastoreCheck m_check = new HiveMultipleMetastoreCheck();
+  private final RepositoryVersionDAO repositoryVersionDAO = Mockito.mock(
+      RepositoryVersionDAO.class);
+
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
 
   /**
    *
@@ -58,6 +65,9 @@ public class HiveMultipleMetastoreCheckTest {
     };
     Configuration config = Mockito.mock(Configuration.class);
     m_check.config = config;
+
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("1.0.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.0"));
   }
 
   /**
@@ -77,7 +87,7 @@ public class HiveMultipleMetastoreCheckTest {
     services.put("HDFS", Mockito.mock(Service.class));
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.3.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
 
     // HIVE not installed
     Assert.assertFalse(m_check.isApplicable(request));
@@ -85,6 +95,16 @@ public class HiveMultipleMetastoreCheckTest {
     // install HIVE
     services.put("HIVE", Mockito.mock(Service.class));
 
+    m_check.repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
+      @Override
+      public RepositoryVersionDAO get() {
+        return repositoryVersionDAO;
+      }
+    };
+
+    Mockito.when(repositoryVersionDAO.findByStackNameAndVersion(Mockito.anyString(),
+        Mockito.anyString())).thenReturn(m_repositoryVersion);
+
     // HIVE installed
     Assert.assertTrue(m_check.isApplicable(request));
   }
@@ -112,7 +132,7 @@ public class HiveMultipleMetastoreCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.3.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
     m_check.perform(check, request);
 
     Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus());
@@ -144,7 +164,7 @@ public class HiveMultipleMetastoreCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.3.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
     m_check.perform(check, request);
 
     Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus());

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsHeartbeatCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsHeartbeatCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsHeartbeatCheckTest.java
index cc2c276..a15f9c1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsHeartbeatCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsHeartbeatCheckTest.java
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -32,6 +33,7 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -44,12 +46,22 @@ import com.google.inject.Provider;
 public class HostsHeartbeatCheckTest {
   private final Clusters clusters = Mockito.mock(Clusters.class);
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  /**
+   *
+   */
+  @Before
+  public void setup() {
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.2.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2"));
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1");
-    checkRequest.setRepositoryVersion("HDP-2.2.0.0");
     checkRequest.setSourceStackId(new StackId("HDP", "2.2"));
-    checkRequest.setTargetStackId(new StackId("HDP", "2.2"));
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
     HostsHeartbeatCheck hhc = new HostsHeartbeatCheck();
     Configuration config = Mockito.mock(Configuration.class);
     hhc.config = config;

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
index 1e87146..6399166 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
@@ -24,6 +24,7 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -51,19 +52,29 @@ public class HostsMasterMaintenanceCheckTest {
   private final RepositoryVersionHelper repositoryVersionHelper = Mockito.mock(RepositoryVersionHelper.class);
   private final AmbariMetaInfo ambariMetaInfo = Mockito.mock(AmbariMetaInfo.class);
 
+  final RepositoryVersionEntity repositoryVersion = Mockito.mock(
+      RepositoryVersionEntity.class);
+
   @Test
   public void testIsApplicable() throws Exception {
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("1.0.0.0-1234");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.0"));
+
     final PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("not null");
+    request.setTargetRepositoryVersion(repositoryVersion);
     HostsMasterMaintenanceCheck hmmc = new HostsMasterMaintenanceCheck();
     Configuration config = Mockito.mock(Configuration.class);
     hmmc.config = config;
     Assert.assertTrue(hmmc.isApplicable(request));
     Assert.assertTrue(new HostsMasterMaintenanceCheck().isApplicable(request));
+
     HostsMasterMaintenanceCheck hmmc2 = new HostsMasterMaintenanceCheck();
     hmmc2.config = config;
     Assert.assertTrue(hmmc2.isApplicable(request));
-    request.setRepositoryVersion(null);
+    request.setTargetRepositoryVersion(repositoryVersion);
+
+    // reset the mock
+    Mockito.reset(repositoryVersion);
 
     hmmc2.config = config;
     Assert.assertFalse(hmmc2.isApplicable(request));
@@ -71,6 +82,9 @@ public class HostsMasterMaintenanceCheckTest {
 
   @Test
   public void testPerform() throws Exception {
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("1.0.0.0-1234");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.0"));
+
     final String upgradePackName = "upgrade_pack";
     final HostsMasterMaintenanceCheck hostsMasterMaintenanceCheck = new HostsMasterMaintenanceCheck();
     hostsMasterMaintenanceCheck.clustersProvider = new Provider<Clusters>() {
@@ -108,6 +122,7 @@ public class HostsMasterMaintenanceCheckTest {
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
     request.setSourceStackId(new StackId("HDP-1.0"));
+    request.setTargetRepositoryVersion(repositoryVersion);
     hostsMasterMaintenanceCheck.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
@@ -115,6 +130,10 @@ public class HostsMasterMaintenanceCheckTest {
     Mockito.when(ambariMetaInfo.getUpgradePacks(Mockito.anyString(), Mockito.anyString())).thenReturn(new HashMap<String, UpgradePack>());
 
     check = new PrerequisiteCheck(null, null);
+    request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(new StackId("HDP-1.0"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     hostsMasterMaintenanceCheck.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
@@ -128,6 +147,10 @@ public class HostsMasterMaintenanceCheckTest {
     Mockito.when(clusters.getHostsForCluster(Mockito.anyString())).thenReturn(new HashMap<String, Host>());
 
     check = new PrerequisiteCheck(null, null);
+    request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(new StackId("HDP-1.0"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     hostsMasterMaintenanceCheck.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
index 4446cc4..67e8dfa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
@@ -37,6 +37,7 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -51,10 +52,18 @@ public class HostsRepositoryVersionCheckTest {
   private final HostVersionDAO hostVersionDAO = Mockito.mock(HostVersionDAO.class);
   private final RepositoryVersionDAO repositoryVersionDAO = Mockito.mock(RepositoryVersionDAO.class);
 
+  final RepositoryVersionEntity repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  @Before
+  public void setup() {
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("1.0.0.0-1234");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.0"));
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     final PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("not null");
+    request.setTargetRepositoryVersion(repositoryVersion);
     HostsRepositoryVersionCheck hrvc = new HostsRepositoryVersionCheck();
     Configuration config = Mockito.mock(Configuration.class);
     hrvc.config = config;
@@ -63,7 +72,9 @@ public class HostsRepositoryVersionCheckTest {
     HostsRepositoryVersionCheck hrvc2 = new HostsRepositoryVersionCheck();
     hrvc2.config = config;
     Assert.assertTrue(hrvc2.isApplicable(request));
-    request.setRepositoryVersion(null);
+
+    Mockito.reset(repositoryVersion);
+    request.setTargetRepositoryVersion(repositoryVersion);
 
     HostsMasterMaintenanceCheck hmmc2 = new HostsMasterMaintenanceCheck();
     hmmc2.config = config;
@@ -119,7 +130,10 @@ public class HostsRepositoryVersionCheckTest {
         null);
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
-    hostsRepositoryVersionCheck.perform(check, new PrereqCheckRequest("cluster"));
+    PrereqCheckRequest checkRequest = new PrereqCheckRequest("cluster");
+    checkRequest.setTargetRepositoryVersion(repositoryVersion);
+
+    hostsRepositoryVersionCheck.perform(check, checkRequest);
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
     StackEntity stackEntity = new StackEntity();
@@ -146,7 +160,10 @@ public class HostsRepositoryVersionCheckTest {
             Mockito.anyString())).thenReturn(hostVersion);
 
     check = new PrerequisiteCheck(null, null);
-    hostsRepositoryVersionCheck.perform(check, new PrereqCheckRequest("cluster"));
+    checkRequest = new PrereqCheckRequest("cluster");
+    checkRequest.setTargetRepositoryVersion(repositoryVersion);
+
+    hostsRepositoryVersionCheck.perform(check, checkRequest);
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
   }
 
@@ -189,11 +206,8 @@ public class HostsRepositoryVersionCheckTest {
     hosts.put("host3", host3);
     Mockito.when(clusters.getHostsForCluster("cluster")).thenReturn(hosts);
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity();
-    rve.setVersion("1.1.1");
-
     HostVersionEntity hve = new HostVersionEntity();
-    hve.setRepositoryVersion(rve);
+    hve.setRepositoryVersion(repositoryVersion);
     hve.setState(RepositoryVersionState.INSTALLED);
 
     Mockito.when(
@@ -202,7 +216,8 @@ public class HostsRepositoryVersionCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("1.1.1");
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     hostsRepositoryVersionCheck.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
   }
@@ -246,11 +261,8 @@ public class HostsRepositoryVersionCheckTest {
     hosts.put("host3", host3);
     Mockito.when(clusters.getHostsForCluster("cluster")).thenReturn(hosts);
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity();
-    rve.setVersion("1.1.1");
-
     HostVersionEntity hve = new HostVersionEntity();
-    hve.setRepositoryVersion(rve);
+    hve.setRepositoryVersion(repositoryVersion);
     hve.setState(RepositoryVersionState.NOT_REQUIRED);
 
     Mockito.when(
@@ -259,7 +271,7 @@ public class HostsRepositoryVersionCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("1.1.1");
+    request.setTargetRepositoryVersion(repositoryVersion);
     hostsRepositoryVersionCheck.perform(check, request);
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
index 986a0f1..9d623ec 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
@@ -39,6 +39,7 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mockito;
@@ -65,12 +66,22 @@ public class InstallPackagesCheckTest {
   private String repositoryVersion = "2.2.6.0-1234";
   private String clusterName = "cluster";
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  /**
+   *
+   */
+  @Before
+  public void setup() {
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn(repositoryVersion);
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(targetStackId);
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     PrereqCheckRequest checkRequest = new PrereqCheckRequest(clusterName);
-    checkRequest.setRepositoryVersion(repositoryVersion);
     checkRequest.setSourceStackId(sourceStackId);
-    checkRequest.setTargetStackId(targetStackId);
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
     InstallPackagesCheck ipc = new InstallPackagesCheck();
     Configuration config = Mockito.mock(Configuration.class);
     ipc.config = config;
@@ -144,9 +155,8 @@ public class InstallPackagesCheckTest {
     Mockito.when(cluster.getHosts()).thenReturn(hosts);
 
     PrereqCheckRequest checkRequest = new PrereqCheckRequest(clusterName);
-    checkRequest.setRepositoryVersion(repositoryVersion);
     checkRequest.setSourceStackId(sourceStackId);
-    checkRequest.setTargetStackId(targetStackId);
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
 
     // Case 1. Initialize with good values
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
index 016bdd08..32ecc26 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
@@ -49,6 +49,8 @@ public class MapReduce2JobHistoryStatePreservingCheckTest {
 
   private final MapReduce2JobHistoryStatePreservingCheck m_check = new MapReduce2JobHistoryStatePreservingCheck();
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   /**
    *
    */
@@ -75,6 +77,9 @@ public class MapReduce2JobHistoryStatePreservingCheckTest {
     RepositoryVersionEntity rve = Mockito.mock(RepositoryVersionEntity.class);
     Mockito.when(rve.getType()).thenReturn(RepositoryType.STANDARD);
     Mockito.when(m_repositoryVersionDao.findByStackNameAndVersion(Mockito.anyString(), Mockito.anyString())).thenReturn(rve);
+
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.1.1-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3"));
   }
 
   /**
@@ -91,8 +96,8 @@ public class MapReduce2JobHistoryStatePreservingCheckTest {
     Mockito.when(cluster.getServices()).thenReturn(services);
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setTargetStackId(new StackId("HDP", "2.3.1.1"));
     request.setSourceStackId(new StackId("HDP", "2.3.0.0"));
+    request.setTargetRepositoryVersion(m_repositoryVersion);
 
     // MAPREDUCE2 not installed
     Assert.assertFalse(m_check.isApplicable(request));

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/PreviousUpgradeCompletedTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/PreviousUpgradeCompletedTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/PreviousUpgradeCompletedTest.java
index 3233e55..a6bdc96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/PreviousUpgradeCompletedTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/PreviousUpgradeCompletedTest.java
@@ -44,7 +44,6 @@ public class PreviousUpgradeCompletedTest {
   private final Cluster cluster = Mockito.mock(Cluster.class);
   private StackId sourceStackId = new StackId("HDP", "2.2");
   private StackId targetStackId = new StackId("HDP", "2.2");
-  private String sourceRepositoryVersion = "2.2.6.0-1234";
   private String destRepositoryVersion = "2.2.8.0-5678";
   private String clusterName = "cluster";
   private PrereqCheckRequest checkRequest = new PrereqCheckRequest(clusterName);
@@ -67,9 +66,12 @@ public class PreviousUpgradeCompletedTest {
     stack.setStackName(stackId.getStackName());
     stack.setStackVersion(stackId.getStackVersion());
 
-    checkRequest.setRepositoryVersion(sourceRepositoryVersion);
+    toRepsitoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+    Mockito.when(toRepsitoryVersion.getVersion()).thenReturn(destRepositoryVersion);
+    Mockito.when(toRepsitoryVersion.getStackId()).thenReturn(targetStackId);
+
     checkRequest.setSourceStackId(sourceStackId);
-    checkRequest.setTargetStackId(targetStackId);
+    checkRequest.setTargetRepositoryVersion(toRepsitoryVersion);
 
     puc.clustersProvider = new Provider<Clusters>() {
       @Override
@@ -77,9 +79,6 @@ public class PreviousUpgradeCompletedTest {
         return clusters;
       }
     };
-
-    toRepsitoryVersion = Mockito.mock(RepositoryVersionEntity.class);
-    Mockito.when(toRepsitoryVersion.getVersion()).thenReturn(destRepositoryVersion);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicePresenceCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicePresenceCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicePresenceCheckTest.java
index ed2fcb9..55c622e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicePresenceCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicePresenceCheckTest.java
@@ -21,9 +21,11 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
@@ -42,6 +44,8 @@ public class ServicePresenceCheckTest {
 
   private final ServicePresenceCheck m_check = new ServicePresenceCheck();
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   /**
    *
    */
@@ -54,6 +58,9 @@ public class ServicePresenceCheckTest {
         return m_clusters;
       }
     };
+
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.5.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.5"));
   }
 
   @Test
@@ -74,7 +81,7 @@ public class ServicePresenceCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.5.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
     request.setPrerequisiteCheckConfig(prerequisiteCheckConfig);
 
     m_check.perform(check, request);
@@ -138,12 +145,12 @@ public class ServicePresenceCheckTest {
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
     Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster);
 
-    Map<String, Service> services = new HashMap<String, Service>();
+    Map<String, Service> services = new HashMap<>();
     services.put("ATLAS", Mockito.mock(Service.class));
     services.put("OLDSERVICE", Mockito.mock(Service.class));
     Mockito.when(cluster.getServices()).thenReturn(services);
 
-    Map<String, String> checkProperties = new HashMap<String, String>();
+    Map<String, String> checkProperties = new HashMap<>();
     checkProperties.put(ServicePresenceCheck.REMOVED_SERVICES_PROPERTY_NAME,"OldService");
 
     PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMaintenanceModeCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMaintenanceModeCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMaintenanceModeCheckTest.java
index a941b7a..636cafe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMaintenanceModeCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMaintenanceModeCheckTest.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
@@ -30,6 +31,7 @@ import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -42,12 +44,22 @@ import com.google.inject.Provider;
 public class ServicesMaintenanceModeCheckTest {
   private final Clusters clusters = Mockito.mock(Clusters.class);
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  /**
+   *
+   */
+  @Before
+  public void setup() {
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.2.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2"));
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1");
-    checkRequest.setRepositoryVersion("HDP-2.2.0.0");
     checkRequest.setSourceStackId(new StackId("HDP", "2.2"));
-    checkRequest.setTargetStackId(new StackId("HDP", "2.2"));
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
 
     ServicesMaintenanceModeCheck smmc = new ServicesMaintenanceModeCheck();
     Configuration config = Mockito.mock(Configuration.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
index ca71e3f..d0aad2f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
@@ -56,6 +56,8 @@ public class ServicesNamenodeTruncateCheckTest {
   private final Map<String, String> m_configMap = new HashMap<>();
   private RepositoryVersionDAO m_repositoryVersionDAO = EasyMock.createMock(RepositoryVersionDAO.class);
 
+  final RepositoryVersionEntity repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   @Before
   public void setup() throws Exception {
     Cluster cluster = EasyMock.createMock(Cluster.class);
@@ -97,6 +99,8 @@ public class ServicesNamenodeTruncateCheckTest {
     expect(m_repositoryVersionDAO.findByStackNameAndVersion(EasyMock.anyString(), EasyMock.anyString())).andReturn(rve).anyTimes();
     replay(m_repositoryVersionDAO, rve);
 
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("HDP-2.2.0.0");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.0"));
   }
 
 
@@ -104,9 +108,8 @@ public class ServicesNamenodeTruncateCheckTest {
   public void testIsApplicable() throws Exception {
 
     PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1");
-    checkRequest.setRepositoryVersion("HDP-2.2.0.0");
     checkRequest.setSourceStackId(new StackId("HDP", "2.2"));
-    checkRequest.setTargetStackId(new StackId("HDP", "2.2"));
+    checkRequest.setTargetRepositoryVersion(repositoryVersion);
 
     Assert.assertTrue(m_check.isApplicable(checkRequest));
   }
@@ -121,7 +124,11 @@ public class ServicesNamenodeTruncateCheckTest {
     // Check HDP-2.2.x => HDP-2.2.y is FAIL
     m_configMap.put("dfs.allow.truncate", "true");
     request.setSourceStackId(new StackId("HDP-2.2.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.2.8.4"));
+
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("2.2.8.4");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     check = new PrerequisiteCheck(null, null);
     m_check.perform(check, request);
     assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
@@ -134,7 +141,11 @@ public class ServicesNamenodeTruncateCheckTest {
     // Check HDP-2.2.x => HDP-2.3.y is FAIL
     m_configMap.put("dfs.allow.truncate", "true");
     request.setSourceStackId(new StackId("HDP-2.2.4.2"));
-    request.setTargetStackId(new StackId("HDP-2.3.8.4"));
+
+    Mockito.when(repositoryVersion.getVersion()).thenReturn("2.3.8.4");
+    Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3.8.4"));
+    request.setTargetRepositoryVersion(repositoryVersion);
+
     check = new PrerequisiteCheck(null, null);
     m_check.perform(check, request);
     assertEquals(PrereqCheckStatus.FAIL, check.getStatus());

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesUpCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesUpCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesUpCheckTest.java
index 45c24d3..ba0f701 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesUpCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesUpCheckTest.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.models.HostComponentSummary;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -39,6 +40,7 @@ import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mockito;
@@ -61,12 +63,23 @@ public class ServicesUpCheckTest {
   private final Clusters clusters = Mockito.mock(Clusters.class);
   private AmbariMetaInfo ambariMetaInfo = Mockito.mock(AmbariMetaInfo.class);
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
+  /**
+   *
+   */
+  @Before
+  public void setup() {
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.2.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2"));
+  }
+
   @Test
   public void testIsApplicable() throws Exception {
     PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1");
-    checkRequest.setRepositoryVersion("HDP-2.2.0.0");
     checkRequest.setSourceStackId(new StackId("HDP", "2.2"));
-    checkRequest.setTargetStackId(new StackId("HDP", "2.2"));
+    checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
+
     ServicesUpCheck suc = new ServicesUpCheck();
     Configuration config = Mockito.mock(Configuration.class);
     suc.config = config;

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java
index 5c423b5..5cb666b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java
@@ -50,6 +50,8 @@ public class YarnTimelineServerStatePreservingCheckTest {
 
   private final YarnTimelineServerStatePreservingCheck m_check = new YarnTimelineServerStatePreservingCheck();
 
+  final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class);
+
   /**
    *
    */
@@ -64,6 +66,9 @@ public class YarnTimelineServerStatePreservingCheckTest {
     };
     Configuration config = Mockito.mock(Configuration.class);
     m_check.config = config;
+
+    Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.0.0-1234");
+    Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3"));
   }
 
   /**
@@ -89,7 +94,7 @@ public class YarnTimelineServerStatePreservingCheckTest {
         m_check.getClass().getName())).thenReturn(checkProperties);
 
     PrereqCheckRequest request = new PrereqCheckRequest("cluster");
-    request.setRepositoryVersion("2.3.0.0");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
     request.setPrerequisiteCheckConfig(prerequisiteCheckConfig);
 
     // YARN not installed

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
index 2a48fa6..3de20e4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
@@ -121,9 +121,9 @@ public class PreUpgradeCheckResourceProviderTest {
     expect(targetStackId.getStackName()).andReturn("Stack100").anyTimes();
     expect(targetStackId.getStackVersion()).andReturn("1.1").anyTimes();
 
-    expect(repoDao.findByStackNameAndVersion("Stack100", "Repo100")).andReturn(repo).anyTimes();
+    expect(repoDao.findByPK(1L)).andReturn(repo).anyTimes();
     expect(repo.getStackId()).andReturn(targetStackId).atLeastOnce();
-    expect(upgradeHelper.suggestUpgradePack("Cluster100", "1.0", "Repo100", Direction.UPGRADE, UpgradeType.NON_ROLLING, "upgrade_pack11")).andReturn(upgradePack);
+    expect(upgradeHelper.suggestUpgradePack("Cluster100", currentStackId, targetStackId, Direction.UPGRADE, UpgradeType.NON_ROLLING, "upgrade_pack11")).andReturn(upgradePack);
 
     List<AbstractCheckDescriptor> upgradeChecksToRun = new LinkedList<>();
     List<String> prerequisiteChecks = new LinkedList<>();
@@ -147,7 +147,7 @@ public class PreUpgradeCheckResourceProviderTest {
     Predicate predicate = builder.property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
         .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals("upgrade_pack11").and()
         .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(UpgradeType.NON_ROLLING).and()
-        .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals("Repo100").toPredicate();
+        .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID).equals("1").toPredicate();
 
 
     System.out.println("PreUpgradeCheckResourceProvider - " + provider);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d77bde7f/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 921322b..aa81614 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -191,15 +191,15 @@ public class UpgradeHelperTest extends EasyMockSupport {
   @Test
   public void testSuggestUpgradePack() throws Exception{
     final String clusterName = "c1";
-    final String upgradeFromVersion = "2.1.1";
-    final String upgradeToVersion = "2.2.0";
+    final StackId sourceStackId = new StackId("HDP", "2.1.1");
+    final StackId targetStackId = new StackId("HDP", "2.2.0");
     final Direction upgradeDirection = Direction.UPGRADE;
     final UpgradeType upgradeType = UpgradeType.ROLLING;
 
     makeCluster();
     try {
       String preferredUpgradePackName = "upgrade_test";
-      UpgradePack up = m_upgradeHelper.suggestUpgradePack(clusterName, upgradeFromVersion, upgradeToVersion, upgradeDirection, upgradeType, preferredUpgradePackName);
+      UpgradePack up = m_upgradeHelper.suggestUpgradePack(clusterName, sourceStackId, targetStackId, upgradeDirection, upgradeType, preferredUpgradePackName);
       assertEquals(upgradeType, up.getType());
     } catch (AmbariException e){
       assertTrue(false);
@@ -1734,8 +1734,8 @@ public class UpgradeHelperTest extends EasyMockSupport {
   @Test
   public void testRollingUpgradesCanUseAdvancedGroupings() throws Exception {
     final String clusterName = "c1";
-    final String upgradeFromVersion = "2.1.1";
-    final String upgradeToVersion = "2.2.0";
+    final StackId sourceStackId = new StackId("HDP", "2.1.1");
+    final StackId targetStackId = new StackId("HDP", "2.2.0");
     final Direction upgradeDirection = Direction.UPGRADE;
     final UpgradeType upgradeType = UpgradeType.ROLLING;
 
@@ -1743,8 +1743,8 @@ public class UpgradeHelperTest extends EasyMockSupport {
 
     // grab the right pack
     String preferredUpgradePackName = "upgrade_grouping_rolling";
-    UpgradePack upgradePack = m_upgradeHelper.suggestUpgradePack(clusterName, upgradeFromVersion,
-        upgradeToVersion, upgradeDirection, upgradeType, preferredUpgradePackName);
+    UpgradePack upgradePack = m_upgradeHelper.suggestUpgradePack(clusterName, sourceStackId,
+        targetStackId, upgradeDirection, upgradeType, preferredUpgradePackName);
 
     assertEquals(upgradeType, upgradePack.getType());
 


[46/50] [abbrv] ambari git commit: AMBARI-21157. Logging cleanup around reading config properties file

Posted by nc...@apache.org.
AMBARI-21157. Logging cleanup around reading config properties file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/495a3f46
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/495a3f46
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/495a3f46

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 495a3f467edc4e19e9da76db10a1499e514c80a5
Parents: d77bde7
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Sat Jul 22 18:09:54 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Jul 22 18:10:42 2017 -0700

----------------------------------------------------------------------
 .../org/apache/ambari/server/configuration/Configuration.java  | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/495a3f46/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 28f9d64..1b4d741 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -4064,7 +4064,11 @@ public class Configuration {
       if (result != null) {
         password = new String(result);
       } else {
-        LOG.error("Cannot read password for alias = " + aliasStr);
+        if (CredentialProvider.isAliasString(aliasStr)) {
+          LOG.error("Cannot read password for alias = " + aliasStr);
+        } else {
+          LOG.warn("Raw password provided, not an alias. It cannot be read from credential store.");
+        }
       }
     }
     return password;


[16/50] [abbrv] ambari git commit: AMBARI-21478. Wrong string quoting in get_stack_version

Posted by nc...@apache.org.
AMBARI-21478. Wrong string quoting in get_stack_version


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/56462b22
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/56462b22
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/56462b22

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 56462b222f528fd076e64da608d67ea39dab4580
Parents: f072dd2
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Fri Jul 14 18:13:22 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Sat Jul 15 20:54:32 2017 +0200

----------------------------------------------------------------------
 .../resource_management/libraries/functions/get_stack_version.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/56462b22/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
index 463d61f..49416af 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -68,7 +68,7 @@ def get_stack_version(package_name):
   stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
 
   if not os.path.exists(stack_selector_path):
-    Logger.info('Skipping get_stack_version since " + stack_selector_tool + " is not yet available')
+    Logger.info('Skipping get_stack_version since ' + stack_selector_path + ' is not yet available')
     return None # lazy fail
   
   try:
@@ -77,7 +77,7 @@ def get_stack_version(package_name):
     return_code, stack_output = shell.call(command, timeout=20)
   except Exception, e:
     Logger.error(str(e))
-    raise Fail('Unable to execute " + stack_selector_path + " command to retrieve the version.')
+    raise Fail('Unable to execute ' + stack_selector_path + ' command to retrieve the version.')
 
   if return_code != 0:
     raise Fail(


[10/50] [abbrv] ambari git commit: AMBARI-21470 : Kafka Sink does not exclude excluded metrics of type 'guage'. (Qin Liu via avijayan)

Posted by nc...@apache.org.
AMBARI-21470 : Kafka Sink does not exclude excluded metrics of type 'guage'. (Qin Liu via avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9bfea653
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9bfea653
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9bfea653

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 9bfea6530bc9759f518fd616e15bb08244152ab1
Parents: f7fac03
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri Jul 14 09:47:47 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri Jul 14 09:47:47 2017 -0700

----------------------------------------------------------------------
 .../metrics2/sink/kafka/KafkaTimelineMetricsReporter.java      | 6 ++++--
 .../common-services/KAFKA/0.9.0/configuration/kafka-broker.xml | 2 +-
 2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9bfea653/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 6f5e9e0..e126016 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -406,8 +406,10 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
       final String sanitizedName = sanitizeName(name);
 
       try {
-        cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
-        populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+        if (!isExcludedMetric(sanitizedName)) {
+          cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
+          populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+        }
       } catch (NumberFormatException ex) {
         LOG.debug(ex.getMessage());
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9bfea653/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
index 73a5eff..4cd2b0d 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
@@ -138,7 +138,7 @@
   </property>
   <property>
     <name>external.kafka.metrics.exclude.prefix</name>
-    <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec</value>
+    <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec,kafka.server.KafkaServer.ClusterId</value>
     <description>
       Exclude metrics starting with these prefixes from being collected.
     </description>


[21/50] [abbrv] ambari git commit: AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64. (atkach)

Posted by nc...@apache.org.
AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ba2a29fd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ba2a29fd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ba2a29fd

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: ba2a29fd1da5aa264e7873e306c9320e0d3b2a45
Parents: cc412e6
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 17 15:20:36 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 17 16:05:39 2017 +0300

----------------------------------------------------------------------
 .../scripts/controllers/stackVersions/StackVersionsCreateCtrl.js  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ba2a29fd/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 70f6658..9d17075 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -195,8 +195,7 @@ angular.module('ambariAdminConsole')
           if (!existingOSHash[stackOs.OperatingSystems.os_type]) {
             stackOs.selected = false;
             stackOs.repositories.forEach(function(repo) {
-              repo.Repositories.base_url = '';
-              repo.Repositories.initial_base_url = '';
+              repo.Repositories.initial_base_url = repo.Repositories.default_base_url;
             });
             $scope.osList.push(stackOs);
           }


[03/50] [abbrv] ambari git commit: AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-inte

Posted by nc...@apache.org.
AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-env template.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb3d3ea6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb3d3ea6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb3d3ea6

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: eb3d3ea6e5eb9464a135f851658d4aa5b3988efa
Parents: 9f788c3
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 11 15:37:08 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 12 11:55:44 2017 -0700

----------------------------------------------------------------------
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++++-----
 .../HIVE/configuration/hive-interactive-env.xml |  62 ++++----
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 +++++++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 6 files changed, 228 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 21b3d8b..9939536 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -849,3 +849,7 @@ ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-prope
 
 if security_enabled:
   hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
+
+# For ldap - hive_check
+hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
+hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index d144c34..271fff9 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -123,7 +123,8 @@ class HiveServiceCheckDefault(HiveServiceCheck):
                                params.hive_server_principal, kinit_cmd, params.smokeuser,
                                transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
                                ssl=params.hive_ssl, ssl_keystore=ssl_keystore,
-                               ssl_password=ssl_password)
+                               ssl_password=ssl_password, ldap_username=params.hive_ldap_user,
+                               ldap_password=params.hive_ldap_passwd)
         Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
         workable_server_available = True
       except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
index a6cf1bc..929c10d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
@@ -60,56 +60,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index ada4859..86720f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -100,47 +100,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
 
     </value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
new file mode 100644
index 0000000..b6e57e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
@@ -0,0 +1,151 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "templeton.kerberos.secret": "secret",
+                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb3d3ea6/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index b1501b8..60d50eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": "r"
+                  "access": ""
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },


[33/50] [abbrv] ambari git commit: AMBARI-21092. HSI start failed due to Unrecognized VM option 'UseParallelGC-Xss512k' during EU (missing space). (sseth via Swapan Shridhar.

Posted by nc...@apache.org.
AMBARI-21092. HSI start failed due to Unrecognized VM option 'UseParallelGC-Xss512k' during EU (missing space). (sseth via Swapan Shridhar.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/da816585
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/da816585
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/da816585

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: da8165851c5e81433ba843375bfee25c794ef6a4
Parents: b55e457
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Wed Jul 19 14:57:40 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 19 14:57:40 2017 -0700

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/da816585/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index db3ef59..ec6ee55 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -523,7 +523,7 @@
 
           <definition xsi:type="configure" id="llap_append_stack_size_java_opts" summary="Update JVM stack size for LLAP">
             <type>hive-interactive-env</type>
-            <insert key="llap_java_opts" value="-Xss512k" insert-type="append" newline-before="false" newline-after="false" />
+            <insert key="llap_java_opts" value=" -Xss512k " insert-type="append" newline-before="false" newline-after="false" />
           </definition>
 
           <definition xsi:type="configure" id="llap_update_shuffle_parallel_copies" summary="Update tez shuffle parallel copies for LLAP">


[35/50] [abbrv] ambari git commit: AMBARI-21526 - ZKFC Doesn't Update its Version On Some Upgrades (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21526 - ZKFC Doesn't Update its Version On Some Upgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4fdca575
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4fdca575
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4fdca575

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4fdca575bcf61e5d8d85a4ff6b7fc08d61dfb5e4
Parents: 6b4d093
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 19 16:49:01 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 19 20:30:27 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  2 +-
 .../listeners/upgrade/StackVersionListener.java | 33 +++++++++--
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |  8 +--
 .../upgrade/StackVersionListenerTest.java       | 58 ++++++++++++--------
 4 files changed, 65 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4fdca575/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 24201dd..b241288 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -123,7 +123,7 @@ def get_stack_feature_version(config):
   # if this is not an upgrade, then we take the simple path
   if upgrade_direction is None:
     Logger.info(
-      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3} -> {4}".format(
         stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks

http://git-wip-us.apache.org/repos/asf/ambari/blob/4fdca575/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
index f8d5a5f..b812476 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
@@ -19,13 +19,16 @@ package org.apache.ambari.server.events.listeners.upgrade;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.EagerSingleton;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.events.HostComponentVersionAdvertisedEvent;
 import org.apache.ambari.server.events.publishers.VersionEventPublisher;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.UpgradeState;
 import org.apache.commons.lang.StringUtils;
@@ -34,6 +37,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.eventbus.Subscribe;
 import com.google.inject.Inject;
+import com.google.inject.Provider;
 import com.google.inject.Singleton;
 
 /**
@@ -55,6 +59,13 @@ public class StackVersionListener {
   private RepositoryVersionDAO repositoryVersionDAO;
 
   /**
+   * Used for looking up a component's advertising version status given a stack
+   * and name.
+   */
+  @Inject
+  private Provider<AmbariMetaInfo> ambariMetaInfoProvider;
+
+  /**
    * Constructor.
    *
    * @param eventPublisher  the publisher
@@ -95,20 +106,30 @@ public class StackVersionListener {
 
     // Update host component version value if needed
     try {
-      ServiceComponent sc = cluster.getService(sch.getServiceName()).getServiceComponent(
-          sch.getServiceComponentName());
+      // get the component information for the desired stack; if a component
+      // moves from UNKNOWN to providing a version, we must do the version
+      // advertised check against the target stack
+      StackId desiredStackId = sch.getDesiredStackId();
+
+      AmbariMetaInfo ambariMetaInfo = ambariMetaInfoProvider.get();
+      ComponentInfo componentInfo = ambariMetaInfo.getComponent(desiredStackId.getStackName(),
+          desiredStackId.getStackVersion(), sch.getServiceName(), sch.getServiceComponentName());
 
       // not advertising a version, do nothing
-      if (!sc.isVersionAdvertised()) {
+      if (!componentInfo.isVersionAdvertised()) {
         // that's odd; a version came back - log it and still do nothing
         if (!StringUtils.equalsIgnoreCase(UNKNOWN_VERSION, newVersion)) {
-          LOG.debug(
+          LOG.warn(
               "ServiceComponent {} doesn't advertise version, however ServiceHostComponent {} on host {} advertised version as {}. Skipping version update",
-              sc.getName(), sch.getServiceComponentName(), sch.getHostName(), newVersion);
+              sch.getServiceComponentName(), sch.getServiceComponentName(), sch.getHostName(),
+              newVersion);
         }
         return;
       }
 
+      ServiceComponent sc = cluster.getService(sch.getServiceName()).getServiceComponent(
+          sch.getServiceComponentName());
+
       // proces the UNKNOWN version
       if (StringUtils.equalsIgnoreCase(UNKNOWN_VERSION, newVersion)) {
         processUnknownDesiredVersion(cluster, sc, sch, newVersion);
@@ -215,4 +236,4 @@ public class StackVersionListener {
     sch.setUpgradeState(upgradeState);
     sch.recalculateHostVersionState();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4fdca575/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index ca5f605..cd47109 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -30,11 +30,6 @@ from resource_management.core import shell
 from resource_management.libraries.functions import conf_select, stack_select
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations
-from resource_management.libraries.functions.security_commons import cached_kinit_executor
-from resource_management.libraries.functions.security_commons import get_params_from_filesystem
-from resource_management.libraries.functions.security_commons import validate_security_config_properties
-from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.script import Script
 from resource_management.core.resources.zkmigrator import ZkMigrator
@@ -144,8 +139,7 @@ class ZkfcSlaveDefault(ZkfcSlave):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version) \
-        and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+    if check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version_for_stack_feature_checks):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-zkfc", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4fdca575/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
index efe594a..ff6fab9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
@@ -65,16 +65,15 @@ public class StackVersionListenerTest extends EasyMockSupport {
   private static final RepositoryVersionEntity DUMMY_REPOSITORY_VERSION_ENTITY = new RepositoryVersionEntity();
   private static final HostVersionEntity DUMMY_HOST_VERSION_ENTITY = new HostVersionEntity();
   private static final UpgradeEntity DUMMY_UPGRADE_ENTITY = new UpgradeEntity();
-  public static final String STACK_NAME = "HDP-2.4.0.0";
-  public static final String STACK_VERSION = "2.4.0.0";
+  public static final String STACK_NAME = "HDP";
+  public static final String STACK_VERSION = "2.4";
 
   private Cluster cluster;
   private ServiceComponentHost sch;
   private Service service;
   private ServiceComponent serviceComponent;
   private VersionEventPublisher publisher = new VersionEventPublisher();
-  private ComponentInfo componentInfo;
-  private StackId stackId;
+  private StackId stackId = new StackId(STACK_NAME, STACK_VERSION);
 
   @TestSubject
   private StackVersionListener listener = new StackVersionListener(publisher);
@@ -82,6 +81,12 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Mock
   private Provider<AmbariMetaInfo> ambariMetaInfoProvider;
 
+  @Mock
+  private ComponentInfo componentInfo;
+
+  @Mock
+  private AmbariMetaInfo ambariMetaInfo;
+
   @Before
   public void setup() throws Exception {
     cluster = createNiceMock(Cluster.class);
@@ -89,20 +94,27 @@ public class StackVersionListenerTest extends EasyMockSupport {
     service = createNiceMock(Service.class);
     serviceComponent = createNiceMock(ServiceComponent.class);
     componentInfo = createNiceMock(ComponentInfo.class);
-    stackId = createNiceMock(StackId.class);
 
     expect(cluster.getClusterId()).andReturn(CLUSTER_ID);
 
-    expect(cluster.getService(SERVICE_NAME)).andReturn(service).atLeastOnce();
-    expect(service.getServiceComponent(SERVICE_COMPONENT_NAME)).andReturn(serviceComponent).atLeastOnce();
+    expect(cluster.getService(SERVICE_NAME)).andReturn(service).anyTimes();
+    expect(service.getServiceComponent(SERVICE_COMPONENT_NAME)).andReturn(
+        serviceComponent).anyTimes();
+    expect(sch.getDesiredStackId()).andReturn(stackId).atLeastOnce();
     expect(sch.getServiceName()).andReturn(SERVICE_NAME).atLeastOnce();
     expect(sch.getServiceComponentName()).andReturn(SERVICE_COMPONENT_NAME).atLeastOnce();
+
+    expect(ambariMetaInfoProvider.get()).andReturn(ambariMetaInfo).atLeastOnce();
+    expect(ambariMetaInfo.getComponent(STACK_NAME, STACK_VERSION, SERVICE_NAME,
+        SERVICE_COMPONENT_NAME)).andReturn(componentInfo).atLeastOnce();
+
+    injectMocks(listener);
   }
 
   @Test
   public void testRecalculateHostVersionStateWhenVersionIsNullAndNewVersionIsNotBlank() throws AmbariException {
     expect(sch.getVersion()).andReturn(null);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setVersion(INVALID_NEW_VERSION);
     expectLastCall().once();
     expect(sch.recalculateHostVersionState()).andReturn(null).once();
@@ -115,7 +127,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testRecalculateHostVersionStateWhenVersionIsUnknownAndNewVersionIsNotBlank() throws AmbariException {
     expect(sch.getVersion()).andReturn(UNKNOWN_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setVersion(INVALID_NEW_VERSION);
     expectLastCall().once();
     expect(sch.recalculateHostVersionState()).andReturn(null).once();
@@ -128,7 +140,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testRecalculateClusterVersionStateWhenVersionIsNullAndNewVersionIsValid() throws AmbariException {
     expect(sch.getVersion()).andReturn(null);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setVersion(VALID_NEW_VERSION);
     expectLastCall().once();
     expect(sch.recalculateHostVersionState()).andReturn(DUMMY_HOST_VERSION_ENTITY).once();
@@ -141,7 +153,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testRecalculateClusterVersionStateWhenVersionIsUnknownAndNewVersionIsValid() throws AmbariException {
     expect(sch.getVersion()).andReturn(UNKNOWN_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setVersion(VALID_NEW_VERSION);
     expectLastCall().once();
     expect(sch.recalculateHostVersionState()).andReturn(DUMMY_HOST_VERSION_ENTITY).once();
@@ -154,7 +166,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testRecalculateHostVersionStateWhenComponentDesiredVersionIsUnknownAndNewVersionIsNotValid() throws AmbariException {
     expect(serviceComponent.getDesiredVersion()).andReturn(UNKNOWN_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
     sch.setVersion(INVALID_NEW_VERSION);
@@ -169,7 +181,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testRecalculateClusterVersionStateWhenComponentDesiredVersionIsUnknownAndNewVersionIsValid() throws AmbariException {
     expect(serviceComponent.getDesiredVersion()).andReturn(UNKNOWN_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
     sch.setVersion(VALID_NEW_VERSION);
@@ -183,7 +195,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
 
   @Test
   public void testRecalculateClusterVersionStateWhenVersionNotAdvertised() throws AmbariException {
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.FALSE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(false).once();
     replayAll();
     sendEventAndVerify(VALID_NEW_VERSION);
   }
@@ -191,7 +203,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
 
   @Test
   public void testNoActionTakenOnNullVersion() {
-    expect(serviceComponent.isVersionAdvertised()).andReturn(true);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     resetAll();
     replayAll();
 
@@ -204,7 +216,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
 
     expect(sch.getVersion()).andReturn(VALID_PREVIOUS_VERSION);
     expect(sch.getUpgradeState()).andReturn(UpgradeState.IN_PROGRESS);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.COMPLETE);
     expectLastCall().once();
 
@@ -218,7 +230,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   public void testSetUpgradeStateToNoneWhenNoUpgradeAndNewVersionIsEqualToComponentDesiredVersion() {
     expect(sch.getVersion()).andReturn(VALID_PREVIOUS_VERSION);
     expect(sch.getUpgradeState()).andReturn(UpgradeState.IN_PROGRESS);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
 
@@ -232,7 +244,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   public void testSetUpgradeStateToVersionMismatchWhenUpgradeIsInProgressAndNewVersionIsNotEqualToComponentDesiredVersion() {
     expect(sch.getVersion()).andReturn(VALID_PREVIOUS_VERSION);
     expect(sch.getUpgradeState()).andReturn(UpgradeState.IN_PROGRESS);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.VERSION_MISMATCH);
     expectLastCall().once();
 
@@ -251,7 +263,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
     expect(sch.getUpgradeState()).andReturn(UpgradeState.VERSION_MISMATCH);
     expect(cluster.getUpgradeInProgress()).andReturn(DUMMY_UPGRADE_ENTITY);
     expect(serviceComponent.getDesiredVersion()).andStubReturn(VALID_NEW_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.COMPLETE);
     expectLastCall().once();
 
@@ -265,7 +277,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
     expect(sch.getVersion()).andReturn(VALID_PREVIOUS_VERSION);
     expect(sch.getUpgradeState()).andReturn(UpgradeState.VERSION_MISMATCH);
     expect(serviceComponent.getDesiredVersion()).andStubReturn(VALID_NEW_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
 
@@ -277,7 +289,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testSetUpgradeStateToVersionMismatchByDefaultWhenHostAndNewVersionsAreValid() {
     expect(sch.getVersion()).andReturn(VALID_PREVIOUS_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
     sch.setUpgradeState(UpgradeState.VERSION_MISMATCH);
     expectLastCall().once();
 
@@ -289,7 +301,7 @@ public class StackVersionListenerTest extends EasyMockSupport {
   @Test
   public void testSetRepositoryVersion() throws Exception {
     expect(sch.getVersion()).andReturn(UNKNOWN_VERSION);
-    expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
 
     RepositoryVersionDAO dao = createNiceMock(RepositoryVersionDAO.class);
     RepositoryVersionEntity entity = createNiceMock(RepositoryVersionEntity.class);
@@ -321,6 +333,8 @@ public class StackVersionListenerTest extends EasyMockSupport {
    */
   @Test
   public void testRepositoryVersionNotSetDuringUpgrade() throws Exception {
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).once();
+
     // this call will make it seem like there is an upgrade in progress
     expect(cluster.getUpgradeInProgress()).andReturn(createNiceMock(UpgradeEntity.class));
 


[19/50] [abbrv] ambari git commit: AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index 2ea07e4..f50a207 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "true",
+        "update_files_only" : "true",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
index 5080d30..c1eb868 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index 6ec9ec9..c99d10b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -32,7 +32,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "output_file":"HDFS_CLIENT-configs.tar.gz"

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
index 1550715..1a4d676 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
@@ -31,7 +31,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index f572413..52a1fde 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index 9979e9d..7283bf5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -47,7 +47,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 8d12b98..4ffa29f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -34,7 +34,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json b/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
index 1554f1b..7efb7d9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
@@ -77,7 +77,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 5327865..76a110e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -38,7 +38,7 @@
         "script": "scripts/yarn_client.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false"
+        "update_files_only" : "false"
     },
     "taskId": 186, 
     "public_hostname": "c6401.ambari.apache.org",

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
index b4342ad..475a6f9 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
@@ -31,7 +31,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
index 9dcb451..7622212 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
index f6de1c4..c2320ba 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
@@ -39,7 +39,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
index 3fd9f72..7b79d84 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
index 59ff82b..f3ea462 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
@@ -388,6 +388,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
index 4d42861..da54b7c 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
@@ -233,6 +233,13 @@ has_ats = not len(ats_host) == 0
 
 nm_hosts = default("/clusterHostInfo/nm_hosts", [])
 
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
 
@@ -315,7 +322,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
index 0f8ce73..cac93ee 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
@@ -56,4 +56,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
index 6a7eea7..71c7bc1 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
          mode="f"
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           mode="f"
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
 
 
@@ -219,7 +226,14 @@ class ResourcemanagerDefault(Resourcemanager):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2 b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file


[34/50] [abbrv] ambari git commit: AMBARI-21522 - Installation Commands On New Clusters Don't Send Down Correct Versions (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21522 - Installation Commands On New Clusters Don't Send Down Correct Versions (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6b4d0930
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6b4d0930
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6b4d0930

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 6b4d0930ae2acf09c9933f35f7dcf007b6ee0f19
Parents: da81658
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed Jul 19 18:38:00 2017 +0300
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 19 20:16:47 2017 -0400

----------------------------------------------------------------------
 .../ATLAS/0.1.0.2.3/package/scripts/atlas_client.py                | 2 +-
 .../common-services/ATLAS/0.1.0.2.3/package/scripts/params.py      | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6b4d0930/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
index 26742ae..2414fff 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
@@ -37,7 +37,7 @@ class AtlasClient(Script):
     import params
     env.set_params(params)
 
-    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version_for_stack_feature_checks):
       conf_select.select(params.stack_name, "atlas", params.version)
       stack_select.select("atlas-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6b4d0930/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index 0c84b5c..3ed469a 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -30,6 +30,7 @@ from resource_management.libraries.functions.default import default
 # Local Imports
 from status_params import *
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.expect import expect
@@ -100,6 +101,7 @@ if security_enabled:
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
 version = default("/commandParams/version", None)
+version_for_stack_feature_checks = get_stack_feature_version(config)
 
 # stack version
 stack_version_unformatted = config['hostLevelParams']['stack_version']


[04/50] [abbrv] ambari git commit: AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f27f3aff
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f27f3aff
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f27f3aff

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f27f3affbb4c7f49944dcefc7581ac228b103e3f
Parents: eb3d3ea
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 13:30:16 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 19:26:37 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  28 ++--
 .../controller/ActionExecutionContext.java      |  30 ++--
 .../controller/AmbariActionExecutionHelper.java |  15 +-
 .../ClusterStackVersionResourceProvider.java    |   2 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java | 142 +++++++------------
 .../ambari/server/state/UpgradeContext.java     |  16 ++-
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../UpgradeUserKerberosDescriptorTest.java      |  59 ++++++--
 .../src/test/python/TestStackFeature.py         |  44 ++++--
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 15 files changed, 199 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 576c138..24201dd 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -105,7 +105,10 @@ def get_stack_feature_version(config):
 
   # something like 2.4.0.0-1234; represents the version for the command
   # (or None if this is a cluster install and it hasn't been calculated yet)
-  version = default("/commandParams/version", None)
+  # this is always guaranteed to be the correct version for the command, even in
+  # upgrade and downgrade scenarios
+  command_version = default("/commandParams/version", None)
+  command_stack = default("/commandParams/target_stack", None)
 
   # something like 2.4.0.0-1234
   # (or None if this is a cluster install and it hasn't been calculated yet)
@@ -115,13 +118,13 @@ def get_stack_feature_version(config):
   upgrade_direction = default("/commandParams/upgrade_direction", None)
 
   # start out with the value that's right 99% of the time
-  version_for_stack_feature_checks = version if version is not None else stack_version
+  version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   # if this is not an upgrade, then we take the simple path
   if upgrade_direction is None:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2} -> {3}".format(
-        stack_version, version, current_cluster_version, version_for_stack_feature_checks))
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+        stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
@@ -130,15 +133,12 @@ def get_stack_feature_version(config):
   is_stop_command = _is_stop_command(config)
   if not is_stop_command:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3} -> {4}".format(
-        stack_version, version, current_cluster_version, upgrade_direction,
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
+        stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
         version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
-  original_stack = default("/commandParams/original_stack", None)
-  target_stack = default("/commandParams/target_stack", None)
-
   # something like 2.5.0.0-5678 (or None)
   downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 
@@ -154,15 +154,13 @@ def get_stack_feature_version(config):
     # UPGRADE
     if current_cluster_version is not None:
       version_for_stack_feature_checks = current_cluster_version
-    elif original_stack is not None:
-      version_for_stack_feature_checks = format_stack_version(original_stack)
     else:
-      version_for_stack_feature_checks = version if version is not None else stack_version
+      version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   Logger.info(
-    "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3}, original_stack={4}, target_stack={5}, downgrade_from_version={6}, stop_command={7} -> {8}".format(
-      stack_version, version, current_cluster_version, upgrade_direction, original_stack,
-      target_stack, downgrade_from_version, is_stop_command, version_for_stack_feature_checks))
+    "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
+      stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
+      is_stop_command, version_for_stack_feature_checks))
 
   return version_for_stack_feature_checks
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 34d6db9..5d71869 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,7 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -44,7 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
-  private StackId stackId;
+  private RepositoryVersionEntity repositoryVersion;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -175,27 +175,29 @@ public class ActionExecutionContext {
   }
 
   /**
-   * Gets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Gets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
-   * @return the stackId the stack to use when generating stack-specific content
-   *         for the command.
+   * @return the repository for the stack/version to use when generating
+   *         stack-specific content for the command.
+   *
+   * @return
    */
-  public StackId getStackId() {
-    return stackId;
+  public RepositoryVersionEntity getRepositoryVersion() {
+    return repositoryVersion;
   }
 
   /**
-   * Sets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Sets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
    * @param stackId
    *          the stackId to use for stack-based properties on the command.
    */
-  public void setStackId(StackId stackId) {
-    this.stackId = stackId;
+  public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+    this.repositoryVersion = repositoryVersion;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 391daa9..55356c7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -539,13 +539,18 @@ public class AmbariActionExecutionHelper {
     // if the repo is null, see if any values from the context should go on the
     // host params and then return
     if (null == repositoryVersion) {
-      if (null != actionContext.getStackId()) {
-        StackId stackId = actionContext.getStackId();
+      // see if the action context has a repository set to use for the command
+      if (null != actionContext.getRepositoryVersion()) {
+        StackId stackId = actionContext.getRepositoryVersion().getStackId();
         hostLevelParams.put(STACK_NAME, stackId.getStackName());
         hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
       }
 
       return;
+    } else {
+      StackId stackId = repositoryVersion.getStackId();
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
     }
 
     JsonObject rootJsonObject = new JsonObject();
@@ -569,11 +574,5 @@ public class AmbariActionExecutionHelper {
     }
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
-
-    // set the host level params if not already set by whoever is creating this command
-    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
-      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index c4fce8a..9ecea95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -613,7 +613,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
 
-    actionContext.setStackId(stackId);
+    actionContext.setRepositoryVersion(repoVersion);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 59690a3..78aaa77 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -29,10 +31,10 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
@@ -48,34 +50,9 @@ import com.google.inject.Inject;
  *
  * @see org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper
  */
-public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
+public class UpgradeUserKerberosDescriptor extends AbstractUpgradeServerAction {
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeUserKerberosDescriptor.class);
 
-  /**
-   * The upgrade direction.
-   *
-   * @see Direction
-   */
-  private static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String TARGET_STACK_KEY = "target_stack";
-
   private final static String KERBEROS_DESCRIPTOR_NAME = "kerberos_descriptor";
   private final static String KERBEROS_DESCRIPTOR_BACKUP_NAME = "kerberos_descriptor_backup";
 
@@ -108,70 +85,73 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     List<String> messages = new ArrayList<>();
     List<String> errorMessages = new ArrayList<>();
 
-    if (cluster != null) {
-      logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
-      TreeMap<String, String> foreignKeys = new TreeMap<>();
-      foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
+    logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
 
-      ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
-      KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
+    TreeMap<String, String> foreignKeys = new TreeMap<>();
+    foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
 
-      if (userDescriptor != null) {
-        StackId originalStackId = getStackIdFromCommandParams(ORIGINAL_STACK_KEY);
-        StackId targetStackId = getStackIdFromCommandParams(TARGET_STACK_KEY);
+    ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
+    KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
 
-        if (isDowngrade()) {
-          restoreDescriptor(foreignKeys, messages, errorMessages);
-        } else {
-          backupDescriptor(foreignKeys, messages, errorMessages);
+    if (userDescriptor != null) {
+
+      @Experimental(
+          feature = ExperimentalFeature.PATCH_UPGRADES,
+          comment = "This needs to be correctly done per-service")
+
+      StackId originalStackId = cluster.getCurrentStackVersion();
+      StackId targetStackId = upgradeContext.getRepositoryVersion().getStackId();
+
+      if (upgradeContext.getDirection() == Direction.DOWNGRADE) {
+        restoreDescriptor(foreignKeys, messages, errorMessages);
+      } else {
+        backupDescriptor(foreignKeys, messages, errorMessages);
 
-          KerberosDescriptor newDescriptor = null;
-          KerberosDescriptor previousDescriptor = null;
+        KerberosDescriptor newDescriptor = null;
+        KerberosDescriptor previousDescriptor = null;
 
-          if (targetStackId == null) {
-            logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
-            newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
+        if (targetStackId == null) {
+          logErrorMessage(messages, errorMessages, "The new stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining new stack Kerberos descriptor for %s.", targetStackId.toString()));
+          newDescriptor = ambariMetaInfo.getKerberosDescriptor(targetStackId.getStackName(), targetStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the new stack version, %s, was not found.", targetStackId.toString()));
           }
+        }
 
-          if (originalStackId == null) {
-            logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
-          } else {
-            logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
-            previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
+        if (originalStackId == null) {
+          logErrorMessage(messages, errorMessages, "The previous stack version information was not found.");
+        } else {
+          logMessage(messages, String.format("Obtaining previous stack Kerberos descriptor for %s.", originalStackId.toString()));
+          previousDescriptor = ambariMetaInfo.getKerberosDescriptor(originalStackId.getStackName(), originalStackId.getStackVersion());
 
-            if (newDescriptor == null) {
-              logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
-            }
+          if (newDescriptor == null) {
+            logErrorMessage(messages, errorMessages, String.format("The Kerberos descriptor for the previous stack version, %s, was not found.", originalStackId.toString()));
           }
+        }
 
-          if (errorMessages.isEmpty()) {
-            logMessage(messages, "Updating the user-specified Kerberos descriptor.");
+        if (errorMessages.isEmpty()) {
+          logMessage(messages, "Updating the user-specified Kerberos descriptor.");
 
-            KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
-                previousDescriptor,
-                newDescriptor,
-                userDescriptor);
+          KerberosDescriptor updatedDescriptor = KerberosDescriptorUpdateHelper.updateUserKerberosDescriptor(
+              previousDescriptor,
+              newDescriptor,
+              userDescriptor);
 
-            logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
+          logMessage(messages, "Storing updated user-specified Kerberos descriptor.");
 
-            entity.setArtifactData(updatedDescriptor.toMap());
-            artifactDAO.merge(entity);
+          entity.setArtifactData(updatedDescriptor.toMap());
+          artifactDAO.merge(entity);
 
-            logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
-          }
+          logMessage(messages, "Successfully updated the user-specified Kerberos descriptor.");
         }
-      } else {
-        logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
       }
     } else {
-      logErrorMessage(messages, errorMessages, String.format("The cluster named %s was not found.", clusterName));
+      logMessage(messages, "A user-specified Kerberos descriptor was not found. No updates are necessary.");
     }
 
     if (!errorMessages.isEmpty()) {
@@ -181,24 +161,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", StringUtils.join(messages, "\n"), StringUtils.join(errorMessages, "\n"));
   }
 
-  /**
-   * Determines if upgrade direction is {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @return {@code true} if {@link Direction#DOWNGRADE}; {@code false} if {@link Direction#UPGRADE}
-   */
-  private boolean isDowngrade() {
-    return Direction.DOWNGRADE.name().equalsIgnoreCase(getCommandParameterValue(UPGRADE_DIRECTION_KEY));
-  }
-
-  private StackId getStackIdFromCommandParams(String commandParamKey) {
-    String stackId = getCommandParameterValue(commandParamKey);
-    if (stackId == null) {
-      return null;
-    } else {
-      return new StackId(stackId);
-    }
-  }
-
   private void logMessage(List<String> messages, String message) {
     LOG.info(message);
     messages.add(message);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 3ecf64d..1695bd3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -99,7 +99,13 @@ public class UpgradeContext {
   public static final String COMMAND_PARAM_TASKS = "tasks";
   public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
 
-  /**
+  @Deprecated
+  @Experimental(
+      feature = ExperimentalFeature.PATCH_UPGRADES,
+      comment = "This isn't needed anymore, but many python classes still use it")
+  public static final String COMMAND_PARAM_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
+
+  /*
    * The cluster that the upgrade is for.
    */
   final private Cluster m_cluster;
@@ -744,6 +750,7 @@ public class UpgradeContext {
    * <ul>
    * <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
    * <li>{@link #COMMAND_PARAM_DIRECTION}
+   * <li>{@link #COMMAND_PARAM_DOWNGRADE_FROM_VERSION}
    * <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
    * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
    * order to have the commands contain the correct configurations. Otherwise,
@@ -758,8 +765,13 @@ public class UpgradeContext {
   public Map<String, String> getInitializedCommandParameters() {
     Map<String, String> parameters = new HashMap<>();
 
+    Direction direction = getDirection();
     parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
-    parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+    parameters.put(COMMAND_PARAM_DIRECTION, direction.name().toLowerCase());
+
+    if (direction == Direction.DOWNGRADE) {
+      parameters.put(COMMAND_PARAM_DOWNGRADE_FROM_VERSION, m_repositoryVersion.getVersion());
+    }
 
     if (null != getType()) {
       // use the serialized attributes of the enum to convert it to a string,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index 74fd76a..93b4944 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -23,6 +23,7 @@ import status_params
 
 from setup_spark import *
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions import conf_select, stack_select
 from resource_management.libraries.functions.get_stack_version import get_stack_version
@@ -56,10 +57,8 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 java_home = config['hostLevelParams']['java_home']
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-if upgrade_direction == Direction.DOWNGRADE:
-  stack_version_unformatted = config['commandParams']['original_stack'].split("-")[1]
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
 
 sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 
@@ -70,7 +69,7 @@ spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
   hadoop_home = stack_select.get_hadoop_dir("home")
   spark_conf = format("{stack_root}/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
@@ -211,7 +210,7 @@ dfs_type = default("/commandParams/dfs_type", "")
 # livy is only supported from HDP 2.5
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+if check_stack_feature(StackFeature.SPARK_LIVY, version_for_stack_feature_checks) and "livy-env" in config['configurations']:
   livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
   livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
   livy_log_dir = config['configurations']['livy-env']['livy_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
index 50c1555..53c8f9e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
@@ -118,11 +118,11 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       mode=0644
     )
 
-  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+  effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
   if effective_version:
     effective_version = format_stack_version(effective_version)
 
-  if effective_version and check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
+  if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
     File(os.path.join(params.spark_conf, 'java-opts'),
       owner=params.spark_user,
       group=params.spark_group,
@@ -134,7 +134,7 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       action="delete"
     )
 
-  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+  if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
     # create spark-thrift-fairscheduler.xml
     File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
       owner=params.spark_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
index 31a296a..2838186 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
@@ -34,11 +34,11 @@ def spark_service(name, upgrade_type=None, action=None):
 
   if action == 'start':
 
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+    effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
     if effective_version:
       effective_version = format_stack_version(effective_version)
 
-    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+    if name == 'jobhistoryserver' and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
       # copy spark-hdp-assembly.jar to hdfs
       copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       # create spark history directory
@@ -58,7 +58,7 @@ def spark_service(name, upgrade_type=None, action=None):
 
     # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
     # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+    if check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version_for_stack_feature_checks):
       resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       if resource_created:
         params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index ed92955..86f6d3b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -36,11 +36,17 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.Before;
@@ -58,23 +64,34 @@ import org.powermock.modules.junit4.PowerMockRunner;
 public class UpgradeUserKerberosDescriptorTest {
   private Clusters clusters;
   private Cluster cluster;
+  private UpgradeEntity upgrade;
+  private UpgradeContext upgradeContext;
   private AmbariMetaInfo ambariMetaInfo;
   private KerberosDescriptorFactory kerberosDescriptorFactory;
   private ArtifactDAO artifactDAO;
+  private UpgradeContextFactory upgradeContextFactory;
 
   private TreeMap<String, Field> fields = new TreeMap<>();
+  private StackId HDP_24 = new StackId("HDP", "2.4");
 
   @Before
   public void setup() throws Exception {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
+    upgrade = EasyMock.createNiceMock(UpgradeEntity.class);
     kerberosDescriptorFactory = EasyMock.createNiceMock(KerberosDescriptorFactory.class);
     ambariMetaInfo = EasyMock.createMock(AmbariMetaInfo.class);
     artifactDAO = EasyMock.createNiceMock(ArtifactDAO.class);
+    upgradeContextFactory = EasyMock.createNiceMock(UpgradeContextFactory.class);
+    upgradeContext = EasyMock.createNiceMock(UpgradeContext.class);
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(cluster.getClusterId()).andReturn(1l).atLeastOnce();
-    replay(clusters, cluster);
+    expect(cluster.getCurrentStackVersion()).andReturn(HDP_24).atLeastOnce();
+    expect(cluster.getUpgradeInProgress()).andReturn(upgrade).atLeastOnce();
+    expect(upgradeContextFactory.create(cluster, upgrade)).andReturn(upgradeContext).atLeastOnce();
+
+    replay(clusters, cluster, upgradeContextFactory, upgrade);
 
     prepareFields();
 
@@ -82,12 +99,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testUpgrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "UPGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -140,12 +161,16 @@ public class UpgradeUserKerberosDescriptorTest {
 
   @Test
   public void testDowngrade() throws Exception {
+    StackId stackId = new StackId("HDP", "2.5");
+    RepositoryVersionEntity repositoryVersion = EasyMock.createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersion.getStackId()).andReturn(stackId).atLeastOnce();
+
+    expect(upgradeContext.getDirection()).andReturn(Direction.DOWNGRADE).atLeastOnce();
+    expect(upgradeContext.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+    replay(repositoryVersion, upgradeContext);
 
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
-    commandParams.put("upgrade_direction", "DOWNGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
-    commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
@@ -188,12 +213,19 @@ public class UpgradeUserKerberosDescriptorTest {
   }
 
   private void prepareFields() throws NoSuchFieldException {
-    String[] fieldsNames = {"artifactDAO","clusters","ambariMetaInfo","kerberosDescriptorFactory"};
-    for(String fieldName : fieldsNames)
-    {
-      Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
-      clustersField.setAccessible(true);
-      fields.put(fieldName, clustersField);
+    String[] fieldsNames = { "artifactDAO", "clusters", "ambariMetaInfo",
+        "kerberosDescriptorFactory", "m_upgradeContextFactory" };
+
+    for (String fieldName : fieldsNames) {
+      try {
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);
+      } catch( NoSuchFieldException noSuchFieldException ){
+        Field clustersField = UpgradeUserKerberosDescriptor.class.getSuperclass().getDeclaredField(fieldName);
+        clustersField.setAccessible(true);
+        fields.put(fieldName, clustersField);        
+      }
     }
   }
   private void injectFields(UpgradeUserKerberosDescriptor action) throws IllegalAccessException {
@@ -201,5 +233,6 @@ public class UpgradeUserKerberosDescriptorTest {
     fields.get("clusters").set(action, clusters);
     fields.get("ambariMetaInfo").set(action, ambariMetaInfo);
     fields.get("kerberosDescriptorFactory").set(action, kerberosDescriptorFactory);
+    fields.get("m_upgradeContextFactory").set(action, upgradeContextFactory);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 0116a7a..230734c 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -28,6 +28,32 @@ from unittest import TestCase
 Logger.initialize_logger()
 
 class TestStackFeature(TestCase):
+  """
+  EU Upgrade (HDP 2.5 to HDP 2.6)
+    - STOP
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.5
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.5.0.0-1237
+    - START
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.6
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.6.0.0-334
+
+  EU Downgrade (HDP 2.6 to HDP 2.5)
+    - STOP
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.6
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.6.0.0-334
+    - START
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.5
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.5.0.0-1237
+  """
+
   def test_get_stack_feature_version_missing_params(self):
     try:
       stack_feature_version = get_stack_feature_version({})
@@ -122,7 +148,7 @@ class TestStackFeature(TestCase):
         "current_version":  "2.4.0.0-1234"
       },
       "commandParams": {
-        "original_stack": "2.4",
+        "source_stack": "2.4",
         "target_stack": "2.5",
         "upgrade_direction": "upgrade",
         "version": "2.5.9.9-9999"
@@ -143,8 +169,8 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
         "version":"2.4.0.0-1234",
         "downgrade_from_version": "2.5.9.9-9999"
@@ -166,10 +192,10 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
@@ -189,10 +215,10 @@ class TestStackFeature(TestCase):
         "custom_command":"STOP"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 7f77d83..3aadf2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 87b18af..2d48ff6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 99fcba0..021695b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -13,7 +13,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2950", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "script_type": "PYTHON"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f27f3aff/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index a9db11c..1805c3b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -22,7 +22,7 @@
         "upgrade_type": "rolling_upgrade",
         "command_retry_max_attempt_count": "3", 
         "version": "2.3.0.0-2096", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_retry_enabled": "false", 
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 


[29/50] [abbrv] ambari git commit: Reverting the accidental commit of a rust file (smohanty)

Posted by nc...@apache.org.
Reverting the accidental commit of a rust file (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4b189a11
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4b189a11
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4b189a11

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4b189a1131a387f1a74e45624af95525e984d30a
Parents: ba977e5
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Jul 19 07:15:05 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Jul 19 07:15:05 2017 -0700

----------------------------------------------------------------------
 ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4b189a11/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs b/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
deleted file mode 100644
index e69de29..0000000


[23/50] [abbrv] ambari git commit: AMBARI-21501. Make HSI's 'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.

Posted by nc...@apache.org.
AMBARI-21501. Make HSI's 'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f450eba5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f450eba5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f450eba5

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f450eba5c23c0d35ab9181d531d9e1ef84cbf3e8
Parents: 01d60f4
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Jul 17 15:04:37 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Jul 17 15:04:37 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 -------------------
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 2 files changed, 1 insertion(+), 152 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f450eba5/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
deleted file mode 100644
index b6e57e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
+++ /dev/null
@@ -1,151 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HIVE",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "hive-site": {
-            "hive.metastore.sasl.enabled": "true",
-            "hive.server2.authentication": "KERBEROS"
-          }
-        },
-        {
-          "ranger-hive-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "HIVE_METASTORE",
-          "identities": [
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-site/hive.metastore.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "HIVE_SERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "hive_server_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type": "service",
-                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
-                "local_username": "${hive-env/hive_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.service.keytab",
-                "owner": {
-                  "name": "${hive-env/hive_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "atlas_kafka",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
-              },
-              "keytab": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
-              }
-            },
-            {
-              "name": "ranger_audit",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "HIVE_SERVER_INTERACTIVE",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/spnego"
-            },
-            {
-              "name": "/YARN/NODEMANAGER/llap_zk_hive"
-            }
-          ]
-        },
-        {
-          "name": "WEBHCAT_SERVER",
-          "identities": [
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "webhcat-site/templeton.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "webhcat-site/templeton.kerberos.keytab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "core-site": {
-                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
-              }
-            },
-            {
-              "webhcat-site": {
-                "templeton.kerberos.secret": "secret",
-                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f450eba5/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index 60d50eb..b1501b8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": ""
+                  "access": "r"
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },


[41/50] [abbrv] ambari git commit: AMBARI-21528. Zookeeper server has incorrect memory setting, missing m in Xmx value (alejandro)

Posted by nc...@apache.org.
AMBARI-21528. Zookeeper server has incorrect memory setting, missing m in Xmx value (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2a298a3f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2a298a3f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2a298a3f

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 2a298a3f707c4a3702d0f70e927946540661c916
Parents: e87a3e3
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu Jul 20 14:24:18 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Jul 20 14:24:18 2017 -0700

----------------------------------------------------------------------
 .../ZOOKEEPER/3.4.5/package/scripts/params_linux.py             | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2a298a3f/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
index 0780d2e..b8e8f78 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
@@ -68,7 +68,10 @@ zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
 zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
 zk_pid_dir = status_params.zk_pid_dir
 zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize_value = default('configurations/zookeeper-env/zk_server_heapsize', "1024m")
+zk_server_heapsize_value = str(default('configurations/zookeeper-env/zk_server_heapsize', "1024"))
+zk_server_heapsize_value = zk_server_heapsize_value.strip()
+if len(zk_server_heapsize_value) > 0 and zk_server_heapsize_value[-1].isdigit():
+  zk_server_heapsize_value = zk_server_heapsize_value + "m"
 zk_server_heapsize = format("-Xmx{zk_server_heapsize_value}")
 
 client_port = default('/configurations/zoo.cfg/clientPort', None)


[38/50] [abbrv] ambari git commit: AMBARI-21498. DB consistency checker throws errors for missing 'product-info' configs after Ambari upgrade (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-21498. DB consistency checker throws errors for missing 'product-info' configs after Ambari upgrade (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d999343f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d999343f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d999343f

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: d999343f97fe4a92625327b6f6e48c0c7c3f3ecf
Parents: 8c15965
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Jul 20 13:35:19 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Jul 20 13:35:19 2017 +0300

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java    | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d999343f/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 0656f68..2227675 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -91,6 +91,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    addNewConfigurationsFromXml();
     resetStackToolsAndFeatures();
   }
 


[20/50] [abbrv] ambari git commit: AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)

Posted by nc...@apache.org.
AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cc412e66
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cc412e66
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cc412e66

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: cc412e66156d5a887a725015537dcb75b0caf986
Parents: 93fe848
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jul 17 13:36:58 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jul 17 13:36:58 2017 +0300

----------------------------------------------------------------------
 .../AmbariCustomCommandExecutionHelper.java     |  14 ++-
 .../AmbariManagementControllerImpl.java         | 121 ++++++++++++++-----
 .../internal/HostResourceProvider.java          |   1 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   8 ++
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  25 +++-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   9 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   7 ++
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      |   8 ++
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |  17 ++-
 .../3.0.0.3.0/package/scripts/params_linux.py   |   8 +-
 .../3.0.0.3.0/package/scripts/params_windows.py |   7 ++
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   6 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 ++-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |   6 +
 .../3.0.0.3.0/package/scripts/params_linux.py   |  11 +-
 .../3.0.0.3.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 ++-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  12 +-
 .../0.8/services/HDFS/package/scripts/params.py |  11 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../0.8/services/YARN/package/scripts/params.py |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |   9 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/exclude_hosts_list.j2     |  21 ++++
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../AmbariManagementControllerTest.java         |   8 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +-
 .../python/stacks/2.0.6/configs/default.json    |   2 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +-
 .../2.0.6/configs/default_no_install.json       |   2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +-
 .../default_update_exclude_file_only.json       |   2 +-
 .../2.0.6/configs/default_with_bucket.json      |   2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |   2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +-
 .../stacks/2.0.6/configs/repository_file.json   |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   2 +-
 .../test/python/stacks/2.3/configs/ats_1_5.json |   2 +-
 .../python/stacks/2.5/configs/hsi_default.json  |   2 +-
 .../2.5/configs/hsi_default_for_restart.json    |   2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   2 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../YARN/package/scripts/params_linux.py        |   9 +-
 .../YARN/package/scripts/params_windows.py      |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |  18 ++-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 69 files changed, 638 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 28aa4e4..aeb5a9c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -137,7 +137,7 @@ public class AmbariCustomCommandExecutionHelper {
   public final static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
   public final static String DECOM_SLAVE_COMPONENT = "slave_type";
   public final static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
-  public final static String UPDATE_EXCLUDE_FILE_ONLY = "update_exclude_file_only";
+  public final static String UPDATE_FILES_ONLY = "update_files_only";
 
   private final static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
 
@@ -910,9 +910,9 @@ public class AmbariCustomCommandExecutionHelper {
               @Override
               public boolean shouldHostBeRemoved(final String hostname)
               throws AmbariException {
-                //Get UPDATE_EXCLUDE_FILE_ONLY parameter as string
+                //Get UPDATE_FILES_ONLY parameter as string
                 String upd_excl_file_only_str = actionExecutionContext.getParameters()
-                .get(UPDATE_EXCLUDE_FILE_ONLY);
+                .get(UPDATE_FILES_ONLY);
 
                 String decom_incl_hosts_str = actionExecutionContext.getParameters()
                 .get(DECOM_INCLUDED_HOSTS);
@@ -986,15 +986,17 @@ public class AmbariCustomCommandExecutionHelper {
         listOfExcludedHosts.add(sch.getHostName());
         if (alignMtnState) {
           sch.setMaintenanceState(MaintenanceState.ON);
+          LOG.info("marking Maintenance=ON on " + sch.getHostName());
         }
-        LOG.info("Decommissioning " + slaveCompType + " and marking Maintenance=ON on " + sch.getHostName());
+        LOG.info("Decommissioning " + slaveCompType + " on " + sch.getHostName());
       }
       if (filteredIncludedHosts.contains(sch.getHostName())) {
         sch.setComponentAdminState(HostComponentAdminState.INSERVICE);
         if (alignMtnState) {
           sch.setMaintenanceState(MaintenanceState.OFF);
+          LOG.info("marking Maintenance=OFF on " + sch.getHostName());
         }
-        LOG.info("Recommissioning " + slaveCompType + " and marking Maintenance=OFF on " + sch.getHostName());
+        LOG.info("Recommissioning " + slaveCompType + " on " + sch.getHostName());
       }
     }
 
@@ -1048,7 +1050,7 @@ public class AmbariCustomCommandExecutionHelper {
       }
 
       if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
-        commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
+        commandParams.put(UPDATE_FILES_ONLY, "false");
         addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), null);
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5639dc1..433ed56 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -42,6 +42,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+import static org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -3334,17 +3335,49 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    Map<String, String> serviceMasterForDecommissionMap = new HashMap<>();
     for (Map<State, List<ServiceComponentHost>> stateScHostMap :
         changedScHosts.values()) {
       for (Entry<State, List<ServiceComponentHost>> entry :
           stateScHostMap.entrySet()) {
         State newState = entry.getKey();
         for (ServiceComponentHost sch : entry.getValue()) {
+          String componentName = sch.getServiceComponentName();
+          //Create map for include/exclude files refresh
+          if (masterToSlaveMappingForDecom.containsValue(componentName) &&
+            sch.getState() == State.INIT && newState == State.INSTALLED) {
+            String serviceName = sch.getServiceName();
+            String masterComponentName = null;
+            for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+              if (entrySet.getValue().equals(componentName)) {
+                masterComponentName = entrySet.getKey();
+              }
+            }
+            try {
+              Service s = cluster.getService(serviceName);
+              //Filter services whose masters are not started
+              if (s.getServiceComponent(masterComponentName).getDesiredState() == State.STARTED) {
+                serviceMasterForDecommissionMap.put(serviceName, masterComponentName);
+              } else {
+                LOG.info(String.format("Not adding %s service from include/exclude files refresh map because it's master is not started", serviceName));
+              }
+            } catch (AmbariException e) {
+              LOG.error("Exception during INIT masters cleanup : ", e);
+            }
+          }
+
+          //actually set the new state
           sch.setDesiredState(newState);
         }
       }
     }
 
+    try {
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(serviceMasterForDecommissionMap, cluster.getClusterName());
+    } catch (AmbariException e) {
+      LOG.error("Exception during refresh include exclude files action : ", e);
+    }
+
     if (ignoredScHosts != null) {
       for (ServiceComponentHost scHost : ignoredScHosts) {
         scHost.setDesiredState(scHost.getState());
@@ -3582,18 +3615,39 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    Map<String, Map<String, String>> clusterServiceMasterForDecommissionMap = new HashMap<>();
     for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry : safeToRemoveSCHs.entrySet()) {
       for (ServiceComponentHost componentHost : entry.getValue()) {
         try {
           deleteHostComponent(entry.getKey(), componentHost);
           deleteStatusMetaData.addDeletedKey(componentHost.getHostName() + "/" + componentHost.getServiceComponentName());
-
+          //create cluster-master-service map to update all include/exclude files in one action
+          String componentName = componentHost.getServiceComponentName();
+          if (masterToSlaveMappingForDecom.containsValue(componentName)) {
+            String masterComponentName = null;
+            for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+              if (entrySet.getValue().equals(componentName)) {
+                masterComponentName = entrySet.getKey();
+              }
+            }
+            if (clusterServiceMasterForDecommissionMap.containsKey(componentHost.getClusterName())) {
+              clusterServiceMasterForDecommissionMap.get(componentHost.getClusterName()).put(componentHost.getServiceName(), masterComponentName);
+            } else {
+              Map<String, String> tempMap = new HashMap<>();
+              tempMap.put(componentHost.getServiceName(), masterComponentName);
+              clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), tempMap);
+            }
+          }
         } catch (Exception ex) {
           deleteStatusMetaData.addException(componentHost.getHostName() + "/" + componentHost.getServiceComponentName(), ex);
         }
       }
     }
 
+    for (String cluster : clusterServiceMasterForDecommissionMap.keySet()) {
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster), cluster);
+    }
+
     //Do not break behavior for existing clients where delete request contains only 1 host component.
     //Response for these requests will have empty body with appropriate error code.
     if (deleteStatusMetaData.getDeletedKeys().size() + deleteStatusMetaData.getExceptionForKeys().size() == 1) {
@@ -3616,7 +3670,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   private void deleteHostComponent(ServiceComponent serviceComponent, ServiceComponentHost componentHost) throws AmbariException {
-    String included_hostname = componentHost.getHostName();
     String serviceName = serviceComponent.getServiceName();
     String master_component_name = null;
     String slave_component_name = componentHost.getServiceComponentName();
@@ -3624,37 +3677,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     State slaveState = componentHost.getState();
     //Delete hostcomponents
     serviceComponent.deleteServiceComponentHosts(componentHost.getHostName());
-    // If deleted hostcomponents support decomission and were decommited and stopped
-    if (AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.containsValue(slave_component_name)
+    // If deleted hostcomponents support decomission and were decommited and stopped or in unknown state
+    if (masterToSlaveMappingForDecom.containsValue(slave_component_name)
             && desiredAdminState.equals(HostComponentAdminState.DECOMMISSIONED)
-            && slaveState.equals(State.INSTALLED)) {
-
-      for (Entry<String, String> entrySet : AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.entrySet()) {
+            && (slaveState.equals(State.INSTALLED) || slaveState.equals(State.UNKNOWN))) {
+      for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
         if (entrySet.getValue().equals(slave_component_name)) {
           master_component_name = entrySet.getKey();
         }
       }
-      //Clear exclud file or draining list except HBASE
-      if (!serviceName.equals(Service.Type.HBASE.toString())) {
-        HashMap<String, String> requestProperties = new HashMap<>();
-        requestProperties.put("context", "Remove host " +
-                included_hostname + " from exclude file");
-        requestProperties.put("exclusive", "true");
-        HashMap<String, String> params = new HashMap<>();
-        params.put("included_hosts", included_hostname);
-        params.put("slave_type", slave_component_name);
-        params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
-
-        //Create filter for RECOMISSION command
-        RequestResourceFilter resourceFilter
-                = new RequestResourceFilter(serviceName, master_component_name, null);
-        //Create request for RECOMISSION command
-        ExecuteActionRequest actionRequest = new ExecuteActionRequest(
-                serviceComponent.getClusterName(), AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
-                Collections.singletonList(resourceFilter), null, params, true);
-        //Send request
-        createAction(actionRequest, requestProperties);
-      }
 
       //Mark master component as needed to restart for remove host info from components UI
       Cluster cluster = clusters.getCluster(serviceComponent.getClusterName());
@@ -3695,6 +3726,40 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
   }
 
+  /**
+   * Creates and triggers an action to update include and exclude files for the master components depending on current cluster topology and components state
+   * @param serviceMasterMap
+   * @param clusterName
+   * @throws AmbariException
+   */
+  private void createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> serviceMasterMap, String clusterName) throws AmbariException {
+    //Clear include/exclude files or draining list except HBASE
+    serviceMasterMap.remove(Service.Type.HBASE.toString());
+    //exit if empty
+    if (serviceMasterMap.isEmpty()) {
+      return;
+    }
+    LOG.debug("Refresh include/exclude files action will be executed for " + serviceMasterMap);
+    HashMap<String, String> requestProperties = new HashMap<>();
+    requestProperties.put("context", "Update Include and Exclude Files for " + serviceMasterMap.keySet().toString());
+    requestProperties.put("exclusive", "true");
+    HashMap<String, String> params = new HashMap<>();
+    params.put(AmbariCustomCommandExecutionHelper.UPDATE_FILES_ONLY, "false");
+
+    //Create filter for command
+    List<RequestResourceFilter> resourceFilters = new ArrayList<>(serviceMasterMap.size());
+    for (String serviceName : serviceMasterMap.keySet()) {
+      resourceFilters.add(new RequestResourceFilter(serviceName, serviceMasterMap.get(serviceName), null));
+    }
+
+    //Create request for command
+    ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+      clusterName, AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
+      resourceFilters, null, params, true);
+    //Send action
+    createAction(actionRequest, requestProperties);
+  }
+
   @Override
   public void deleteMembers(java.util.Set<MemberRequest> requests) throws AmbariException {
     for (MemberRequest request : requests) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index 4e2944f..8ef42ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -946,6 +946,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
             throw new AmbariException(reason.toString());
           }
         } else {
+//          TODO why host with all components stopped can't be deleted? This functional is implemented and only this validation stops the request.
           if (!componentsToRemove.isEmpty()) {
             StringBuilder reason = new StringBuilder("Cannot remove host ")
                 .append(hostName)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index aad2db0..4eab367 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -99,6 +99,12 @@
       excluded.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
   <!--
     <property>
       <name>dfs.hosts</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 15fda67..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -170,6 +170,14 @@ def hdfs(component=None):
          owner=params.hdfs_user,
          mode="f",
          )
+
+    if params.hdfs_include_file:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         mode="f",
+         )
+      pass
   if params.service_map.has_key(component):
     service_name = params.service_map[component]
     ServiceConfig(service_name,

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 7226d22..cac6e9c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -108,6 +108,14 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
          group=params.user_group
     )
 
+    if params.hdfs_include_file:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+      )
+      pass
+
     if do_format and not params.hdfs_namenode_format_disabled:
       format_namenode()
       pass
@@ -437,7 +445,15 @@ def decommission():
        group=user_group
   )
 
-  if not params.update_exclude_file_only:
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
+  if not params.update_files_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
     )
@@ -464,6 +480,13 @@ def decommission():
        owner=hdfs_user
   )
 
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user
+         )
+    pass
+
   if params.dfs_ha_enabled:
     # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
     # need to execute each command scoped to a particular namenode

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index a9fc179..2854a00 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -163,7 +163,13 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 command_phase = default("/commandParams/phase","")
 
 klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
@@ -172,7 +178,6 @@ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executab
 hostname = config["hostname"]
 public_hostname = config["public_hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
index 1e47c29..b3ac578 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
@@ -43,6 +43,13 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
 #decomission
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 15fda67..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -170,6 +170,14 @@ def hdfs(component=None):
          owner=params.hdfs_user,
          mode="f",
          )
+
+    if params.hdfs_include_file:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         mode="f",
+         )
+      pass
   if params.service_map.has_key(component):
     service_name = params.service_map[component]
     ServiceConfig(service_name,

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 181b3c8..5a1f368 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -436,7 +436,15 @@ def decommission():
        group=user_group
   )
 
-  if not params.update_exclude_file_only:
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
+  if not params.update_files_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
     )
@@ -463,6 +471,13 @@ def decommission():
        owner=hdfs_user
   )
 
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user
+         )
+    pass
+
   if params.dfs_ha_enabled:
     # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
     # need to execute each command scoped to a particular namenode

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index ad49d81..1581c2a 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -158,7 +158,13 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only", False)
 command_phase = default("/commandParams/phase","")
 
 klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
index 70d95a6..6c492d8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
@@ -43,6 +43,13 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
 #decomission
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
index d0d0ede..a65b801 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
@@ -408,6 +408,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index f474a89..67931c6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -241,11 +241,17 @@ user_group = config['configurations']['cluster-env']['user_group']
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
 has_ats = not len(ats_host) == 0
 
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
 
@@ -345,7 +351,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
index 52918d2e..c2a02d7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
@@ -59,4 +59,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index 7d024b1..b929af0 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
          mode="f"
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=yarn_user,
+         mode="f"
+    )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
 
 
@@ -159,7 +166,14 @@ class ResourcemanagerDefault(Resourcemanager):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 64e0bcb..2a69d35 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -402,6 +402,12 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
index a05d259..68d17f0 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
@@ -239,10 +239,17 @@ user_group = config['configurations']['cluster-env']['user_group']
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
 has_ats = not len(ats_host) == 0
 
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
 
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
@@ -341,7 +348,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only", False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
index 52918d2e..c2a02d7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
@@ -59,4 +59,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index f6d6315..961fe63 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
          mode="f"
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           mode="f"
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
 
 
@@ -174,7 +181,14 @@ class ResourcemanagerDefault(Resourcemanager):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           mode="f"
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
index 87684df..66d25cf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
@@ -72,6 +72,12 @@
       excluded.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
   <!--
     <property>
       <name>dfs.hosts</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
index 6de7735..19751f6 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -143,8 +143,16 @@ def decommission():
        owner=hdfs_user,
        group=user_group
   )
-  
-  if not params.update_exclude_file_only:
+
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
+  if not params.update_files_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 19e223c..9cf163a 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -55,13 +55,18 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 
 kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -241,4 +246,4 @@ ttnode_heapsize = "1024m"
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
+script_https_protocol = Script.get_force_https_protocol_name()

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index 8e9b8b1..3cb5add 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
index 33496cfe..87b5992 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
@@ -118,6 +118,14 @@ user_group = config['configurations']['cluster-env']['user_group']
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 hostname = config['hostname']
 
 ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
@@ -162,7 +170,7 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
 )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
index 4d40d68..8bd76bf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
@@ -89,7 +89,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..16db0e4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,6 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 #exclude file
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
 
 hostname = config['hostname']
 
@@ -128,7 +135,7 @@ HdfsDirectory = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
 
 hadoop_bin = "/usr/lib/hadoop/sbin"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             user=yarn_user)
       pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..bd188e9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,7 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 #exclude file
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
-
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
 hostname = config['hostname']
 
 if security_enabled:
@@ -128,7 +134,7 @@ HdfsDirectory = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
 
 hadoop_bin = "/usr/lib/hadoop/sbin"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             user=yarn_user)
       pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b3a12f2..fdfca0f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -6028,8 +6028,8 @@ public class AmbariManagementControllerTest {
     execCmd = storedTasks.get(0).getExecutionCommandWrapper
         ().getExecutionCommand();
     Map<String, String> cmdParams = execCmd.getCommandParams();
-    Assert.assertTrue(cmdParams.containsKey("update_exclude_file_only"));
-    Assert.assertTrue(cmdParams.get("update_exclude_file_only").equals("false"));
+    Assert.assertTrue(cmdParams.containsKey("update_files_only"));
+    Assert.assertTrue(cmdParams.get("update_files_only").equals("false"));
     Assert.assertNotNull(storedTasks);
     Assert.assertEquals(1, storedTasks.size());
     Assert.assertEquals(HostComponentAdminState.DECOMMISSIONED, scHost.getComponentAdminState());
@@ -6084,8 +6084,8 @@ public class AmbariManagementControllerTest {
       Assert.assertTrue(hrc.getCommandDetail().contains(host1));
       Assert.assertTrue(hrc.getCommandDetail().contains(host2));
       cmdParams = hrc.getExecutionCommandWrapper().getExecutionCommand().getCommandParams();
-      if(!cmdParams.containsKey("update_exclude_file_only")
-          || !cmdParams.get("update_exclude_file_only").equals("true")) {
+      if(!cmdParams.containsKey("update_files_only")
+          || !cmdParams.get("update_files_only").equals("true")) {
         countRefresh++;
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index a7233c7..f0f2587 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1041,7 +1041,7 @@ class TestNamenode(RMFTestCase):
                               bin_dir = '/usr/bin')
     self.assertNoMoreResources()
 
-  def test_decommission_update_exclude_file_only(self):
+  def test_decommission_update_files_only(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index ea00a37..f928073 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index d267bc1..a16ec26 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -60,7 +60,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 71423c8..f3e8dc3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -32,7 +32,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 009ff6d..7b0f78d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 2b078c3..01f0efc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index 571b737..0cbd322 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 7fdb449..cfcf5e1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index 7378b68..7db73ab 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -36,7 +36,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],


[36/50] [abbrv] ambari git commit: AMBARI-21516 Log Search docker test environment build front/backend only (mgergely)

Posted by nc...@apache.org.
AMBARI-21516 Log Search docker test environment build front/backend only (mgergely)

Change-Id: I30d9d9a2c38ceeea653f7dda2c51493bd2df7ae0


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/587c42d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/587c42d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/587c42d7

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 587c42d79da4b384ab18d7078c6d045a807a7bb5
Parents: 4fdca57
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Thu Jul 20 12:16:31 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Thu Jul 20 12:16:31 2017 +0200

----------------------------------------------------------------------
 ambari-logsearch/docker/logsearch-docker.sh | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/587c42d7/ambari-logsearch/docker/logsearch-docker.sh
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/logsearch-docker.sh b/ambari-logsearch/docker/logsearch-docker.sh
index 4d53fa1..fc3524d 100755
--- a/ambari-logsearch/docker/logsearch-docker.sh
+++ b/ambari-logsearch/docker/logsearch-docker.sh
@@ -17,10 +17,30 @@
 sdir="`dirname \"$0\"`"
 : ${1:?"argument is missing: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"}
 command="$1"
+shift
+
+while getopts "bf" opt; do
+  case $opt in
+    b) # build backend only
+      maven_build_options="-pl !ambari-logsearch-web"
+      ;;
+    f) # build frontend only
+      maven_build_options="-pl ambari-logsearch-web"
+      ;;
+    \?)
+      echo "Invalid option: -$OPTARG" >&2
+      exit 1
+      ;;
+    :)
+      echo "Option -$OPTARG requires an argument." >&2
+      exit 1
+      ;;
+  esac
+done
 
 function build_logsearch_project() {
   pushd $sdir/../
-  mvn clean package -DskipTests
+  mvn clean package -DskipTests $maven_build_options
   popd
 }
 


[17/50] [abbrv] ambari git commit: AMBARI-21483. Add UID/GID related enhancements (echekanskiy)

Posted by nc...@apache.org.
AMBARI-21483. Add UID/GID related enhancements (echekanskiy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f92d1219
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f92d1219
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f92d1219

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f92d12193b30d53dc06ab9642ef4b9d61b5bac1c
Parents: 56462b2
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Sun Jul 16 20:22:34 2017 +0300
Committer: Eugene Chekanskiy <ec...@hortonworks.com>
Committed: Sun Jul 16 20:22:34 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/state/PropertyInfo.java       |   2 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |  13 +-
 .../before-ANY/scripts/shared_initialization.py |  45 ++-
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 294 +++++++++++--------
 .../app/controllers/wizard/step7_controller.js  |  67 +++++
 .../configs/stack_config_properties_mapper.js   |  14 +-
 ambari-web/app/styles/application.less          |  15 +
 ...ontrols_service_config_usergroup_with_id.hbs |  27 ++
 ambari-web/app/utils/config.js                  |   3 +
 .../configs/service_configs_by_category_view.js |   6 +
 ambari-web/app/views/common/controls_view.js    |  39 +++
 11 files changed, 392 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 62396e3..63c850e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -281,7 +281,9 @@ public class PropertyInfo {
   public enum PropertyType {
     PASSWORD,
     USER,
+    UID,
     GROUP,
+    GID,
     TEXT,
     ADDITIONAL_USER_PROPERTY,
     NOT_MANAGED_HDFS_PATH,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
index 08542c4..4663f10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
@@ -21,6 +21,7 @@
 
 username=$1
 directories=$2
+newUid=$3
 
 function find_available_uid() {
  for ((i=1001; i<=2000; i++))
@@ -34,7 +35,16 @@ function find_available_uid() {
  done
 }
 
-find_available_uid
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid
+  fi
+  echo $newUid
+  exit 0
+fi
 
 if [ $newUid -eq 0 ]
 then
@@ -43,7 +53,6 @@ then
 fi
 
 set -e
-
 dir_array=($(echo $directories | sed 's/,/\n/g'))
 old_uid=$(id -u $username)
 sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 39f5a47..bcc1a3a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -24,6 +24,7 @@ import tempfile
 from copy import copy
 from resource_management.libraries.functions.version import compare_versions
 from resource_management import *
+from resource_management.core import shell
 
 def setup_users():
   """
@@ -43,11 +44,17 @@ def setup_users():
       )
 
     for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
+      if params.override_uid == "true":
+        User(user,
+             uid = get_uid(user),
+             gid = params.user_to_gid_dict[user],
+             groups = params.user_to_groups_dict[user],
+             )
+      else:
+        User(user,
+             gid = params.user_to_gid_dict[user],
+             groups = params.user_to_groups_dict[user],
+             )
 
     if params.override_uid == "true":
       set_uid(params.smoke_user, params.smoke_user_dirs)
@@ -65,6 +72,7 @@ def setup_users():
                create_parents = True,
                cd_access="a",
     )
+
     if params.override_uid == "true":
       set_uid(params.hbase_user, params.hbase_user_dirs)
     else:
@@ -125,7 +133,7 @@ def create_users_and_groups(user_and_groups):
     Group(copy(groups_list),
     )
   return groups_list
-    
+
 def set_uid(user, user_dirs):
   """
   user_dirs - comma separated directories
@@ -136,9 +144,30 @@ def set_uid(user, user_dirs):
        content=StaticFile("changeToSecureUid.sh"),
        mode=0555)
   ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+  uid = get_uid(user)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
           not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
+
+def get_uid(user):
+  import params
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid
+  else:
+    if user == params.smoke_user:
+      return 0
+    File(format("{tmp_dir}/changeUid.sh"),
+         content=StaticFile("changeToSecureUid.sh"),
+         mode=0555)
+    conde, newUid = shell.call((format("{tmp_dir}/changeUid.sh"), format("{user}")), sudo=True)
+    return newUid
+
 def setup_hadoop_env():
   import params
   stackversion = params.stack_version_unformatted

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 75c6543..1d2351f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -21,6 +21,7 @@ limitations under the License.
 from stacks.utils.RMFTestCase import *
 from mock.mock import MagicMock, call, patch
 from resource_management import Hook
+import itertools
 import getpass
 import os
 
@@ -45,147 +46,201 @@ class TestHookBeforeInstall(RMFTestCase):
     self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
                        classname="BeforeAnyHook",
                        command="hook",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Group', 'hadoop',
-    )
-    self.assertResourceCalled('Group', 'nobody',
-    )
-    self.assertResourceCalled('Group', 'users',
+                       config_file="default.json",
+                       call_mocks=itertools.cycle([(0, "1000")])
     )
+    self.assertResourceCalled('Group', 'hadoop',)
+    self.assertResourceCalled('Group', 'nobody',)
+    self.assertResourceCalled('Group', 'users',)
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hive',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'oozie',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'nobody',
-        gid = 'hadoop',
-        groups = [u'nobody'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'nobody'],
+                              )
     self.assertResourceCalled('User', 'ambari-qa',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = 0,
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'flume',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hdfs',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'storm',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'mapred',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hbase',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'tez',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'zookeeper',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'falcon',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'sqoop',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'yarn',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hcat',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
-        content = StaticFile('changeToSecureUid.sh'),
-        mode = 0555,
-    )
-    self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa',
-        not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
-    )
-    self.assertResourceCalled('Directory', self.TMP_PATH,
-        owner = 'hbase',
-        mode = 0775,
-        create_parents = True,
-        cd_access='a'
-    )
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0',
+                              not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
+                              )
+    self.assertResourceCalled('Directory', '/tmp/hbase-hbase',
+                              owner = 'hbase',
+                              create_parents = True,
+                              mode = 0775,
+                              cd_access = 'a',
+                              )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
-        content = StaticFile('changeToSecureUid.sh'),
-        mode = 0555,
-    )
-    self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,' + self.TMP_PATH,
-        not_if = '(test $(id -u hbase) -gt 1000) || (false)',
-    )
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/tmp/hbase-hbase 1000',
+                              not_if = '(test $(id -u hbase) -gt 1000) || (false)',
+                              )
     self.assertResourceCalled('User', 'test_user1',
-        fetch_nonlocal_groups = True,
-    )
+                              fetch_nonlocal_groups = True,
+                              )
     self.assertResourceCalled('User', 'test_user2',
-        fetch_nonlocal_groups = True,
-    )
-    self.assertResourceCalled('Group', 'hdfs',
-    )
-    self.assertResourceCalled('Group', 'test_group',
-    )
+                              fetch_nonlocal_groups = True,
+                              )
+    self.assertResourceCalled('Group', 'hdfs',)
+    self.assertResourceCalled('Group', 'test_group',)
     self.assertResourceCalled('User', 'hdfs',
-        groups = [u'hadoop', u'hdfs', u'test_group'],
-        fetch_nonlocal_groups = True,
-    )
+                              fetch_nonlocal_groups = True,
+                              groups = [u'hadoop', u'hdfs', u'test_group'],
+                              )
     self.assertResourceCalled('Directory', '/etc/hadoop',
-        mode = 0755
-    )
+                              mode = 0755,
+                              )
     self.assertResourceCalled('Directory', '/etc/hadoop/conf.empty',
-        owner = 'root',
-        group = 'hadoop',
-        create_parents = True,
-    )
+                              owner = 'root',
+                              create_parents = True,
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Link', '/etc/hadoop/conf',
-        not_if = 'ls /etc/hadoop/conf',
-        to = '/etc/hadoop/conf.empty',
-    )
+                              not_if = 'ls /etc/hadoop/conf',
+                              to = '/etc/hadoop/conf.empty',
+                              )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-env.sh',
-        content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
-        owner = 'hdfs',
-        group = 'hadoop'
+                              content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
+                              owner = 'hdfs',
+                              group = 'hadoop'
     )
     self.assertResourceCalled('Directory', '/tmp/hadoop_java_io_tmpdir',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              mode = 01777
-    )
-
+                              mode = 01777,
+                              )
     self.assertResourceCalled('Directory', '/tmp/AMBARI-artifacts/',
                               create_parents = True,
                               )
@@ -198,20 +253,17 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('Directory', '/usr/jdk64',)
     self.assertResourceCalled('Execute', ('chmod', 'a+x', u'/usr/jdk64'),
-                              sudo = True
-                              )
-    self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64'
+                              sudo = True,
                               )
+    self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64',)
     self.assertResourceCalled('Directory', '/tmp/jdk_tmp_dir',
-                              action = ['delete']
+                              action = ['delete'],
                               )
-
     self.assertResourceCalled('File', '/usr/jdk64/jdk1.7.0_45/bin/java',
                               mode = 0755,
-                              cd_access = "a",
+                              cd_access = 'a',
                               )
     self.assertResourceCalled('Execute', ('chmod', '-R', '755', u'/usr/jdk64/jdk1.7.0_45'),
-      sudo = True,
-    )
-
+                              sudo = True,
+                              )
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 9a897d0..6a90c26 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -531,6 +531,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     }
     var stepConfigs = this.createStepConfigs();
     var serviceConfigs = this.renderConfigs(stepConfigs, configs);
+    this.addUidAndGidRepresentations(serviceConfigs);
     // if HA is enabled -> Make some reconfigurations
     if (this.get('wizardController.name') === 'addServiceController') {
       this.updateComponentActionConfigs(configs, serviceConfigs);
@@ -802,6 +803,38 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
   },
 
   /**
+   * Set the uid property for user properties. The uid is later used to help map the user and uid values in adjacent columns
+   * @param {object} miscSvc
+   * @param {string} svcName
+   * @private
+   */
+  _setUID: function (miscSvc, svcName) {
+    var user = miscSvc.configs.findProperty('name', svcName + '_user');
+    if (user) {
+      var uid = miscSvc.configs.findProperty('name', user.value + '_uid');
+      if (uid) {
+        user.set('ugid', uid);
+      }
+    }
+  },
+
+  /**
+   * Set the gid property for group properties. The gid is later used to help map the group and gid values in adjacent columns
+   * @param {object} miscSvc
+   * @param {string} svcName
+   * @private
+   */
+  _setGID: function (miscSvc, svcName) {
+    var group = miscSvc.configs.findProperty('name', svcName + '_group');
+    if (group) {
+      var gid = miscSvc.configs.findProperty('name', group.value + '_gid');
+      if (gid) {
+        group.set('ugid', gid);
+      }
+    }
+  },
+
+  /**
    * render configs, distribute them by service
    * and wrap each in ServiceConfigProperty object
    * @param stepConfigs
@@ -841,6 +874,11 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
       this.updateHostOverrides(serviceConfigProperty, _config);
       if (this.get('wizardController.name') === 'addServiceController') {
         this._updateIsEditableFlagForConfig(serviceConfigProperty, true);
+        //since the override_uid and ignore_groupusers_create changes are not saved to the database post install, they should be editable only
+        //during initial cluster installation
+        if (['override_uid', 'ignore_groupsusers_create'].contains(serviceConfigProperty.get('name'))) {
+          serviceConfigProperty.set('isEditable', false);
+        }
       }
       if (!this.get('content.serviceConfigProperties.length') && !serviceConfigProperty.get('hasInitialValue')) {
         App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, dependencies);
@@ -860,6 +898,35 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     return stepConfigs;
   },
 
+  addUidAndGidRepresentations: function(serviceConfigs) {
+    //map the uids to the corresponding users
+    var miscSvc = serviceConfigs.findProperty('serviceName', 'MISC');
+    if (miscSvc) {
+      //iterate through the list of users and groups and assign the uid/gid accordingly
+      //user properties are servicename_user
+      //uid properties are value of servicename_user + _uid
+      //group properties are servicename_group
+      //gid properties are value of servicename_group + _gid
+      //we will map the users/uids and groups/gids based on this assumption
+      this.get('selectedServiceNames').forEach(function (serviceName) {
+        this._setUID(miscSvc, serviceName.toLowerCase());
+        this._setGID(miscSvc, serviceName.toLowerCase());
+      }, this);
+
+      //for zookeeper, the user property name does not follow the convention that users for other services do. i.e. the user property name is not servicename_user as is the case with other services
+      //the user property name is zk_user and not zookeeper_user, hence set the uid for zk_user separately
+      this._setUID(miscSvc, 'zk');
+      //the user property name is mapred_user and not mapreduce2_user for mapreduce2 service, hence set the uid for mapred_user separately
+      this._setUID(miscSvc, 'mapred');
+      //for haddop, the group property name does not follow the convention that groups for other services do. i.e. the group property name is not servicename_group as is the case with other services
+      //the group property name is user_group and not zookeeper_group, hence set the gid for user_group separately
+      this._setGID(miscSvc, 'user');
+
+      // uid/gid properties are displayed in a separate column, hence prevent the properties from showing up on a separate line
+      miscSvc.configs.filterProperty('displayType', 'uid_gid').setEach('isVisible', false);
+    }
+  },
+
   /**
    * Add host name properties to appropriate categories (for installer and add service)
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
index 9b4b920..75a5564 100644
--- a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
+++ b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
@@ -197,9 +197,14 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
    * @param config
    */
   handleSpecialProperties: function(config) {
-    if (!config.StackConfigurations.property_type.contains('ADDITIONAL_USER_PROPERTY')) {
+    var types = config.StackConfigurations.property_type;
+    if (!types.contains('ADDITIONAL_USER_PROPERTY')) {
       config.index = App.StackService.displayOrder.indexOf(config.StackConfigurations.service_name) + 1 || 30;
     }
+    // displayType from stack ignored, cause UID and GID should be shown along with service's user config
+    if (types.contains('UID') || types.contains('GID')) {
+      config.StackConfigurations.property_value_attributes.type = 'uid_gid';
+    }
     config.StackConfigurations.service_name = 'MISC';
     config.category = 'Users and Groups';
   },
@@ -210,7 +215,12 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
    * @returns {Boolean}
    */
   isMiscService: function(type) {
-    return type.length && (type.contains('USER') || type.contains('GROUP') || type.contains('ADDITIONAL_USER_PROPERTY'));
+    return type.length &&
+      (type.contains('USER')
+      || type.contains('GROUP')
+      || type.contains('ADDITIONAL_USER_PROPERTY')
+      || type.contains('UID')
+      || type.contains('GID'));
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 29788bc..a32275f 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1147,6 +1147,21 @@ a:focus {
   }
 }
 
+.serviceConfigUGIDLbl {
+  display: inline-block;
+  text-align: left;
+  margin-left: 92px;
+  width: 100px;
+}
+
+.serviceConfigUGID {
+  width: 150px !important;
+}
+
+.serviceConfigNoUGID {
+  width: 500px !important;
+}
+
 .chart-container {
   cursor: pointer;
   cursor: -moz-zoom-in;

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
new file mode 100644
index 0000000..24c785c
--- /dev/null
+++ b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
@@ -0,0 +1,27 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.isUIDGIDVisible}}
+    {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigUGID"}}
+    <label class="serviceConfigUGIDLbl control-label" {{bindAttr for="view.serviceConfig.ugid.name"}}>
+        {{view.serviceConfig.ugid.displayName}}
+    </label>
+    {{view Ember.TextField valueBinding="view.serviceConfig.ugid.value" class="serviceConfigUGID"}}
+{{else}}
+    {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigNoUGID"}}
+{{/if}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 00cc2a3..7cfcb13 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -545,6 +545,9 @@ App.config = Em.Object.create({
    */
   getViewClass: function (displayType, dependentConfigPattern, unit) {
     switch (displayType) {
+      case 'user':
+      case 'group':
+        return App.ServiceConfigTextFieldUserGroupWithID;
       case 'checkbox':
       case 'boolean':
         return dependentConfigPattern ? App.ServiceConfigCheckboxWithDependencies : App.ServiceConfigCheckbox;

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/views/common/configs/service_configs_by_category_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_configs_by_category_view.js b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
index 4058020..6cf9b99 100644
--- a/ambari-web/app/views/common/configs/service_configs_by_category_view.js
+++ b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
@@ -50,6 +50,7 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.Persist, App.ConfigOverrid
    * @type {App.ServiceConfigProperty[]}
    */
   serviceConfigs: null,
+  isUIDGIDVisible: true,
 
   /**
    * This is array of all the properties which apply
@@ -744,6 +745,11 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.Persist, App.ConfigOverrid
   setRecommendedValue: function (event) {
     var serviceConfigProperty = event.contexts[0];
     serviceConfigProperty.set('value', serviceConfigProperty.get('recommendedValue'));
+
+    //in case of USER/GROUP fields, if they have uid/gid set, then these need to be reset to the recommended value as well
+    if (serviceConfigProperty.get('ugid')) {
+      serviceConfigProperty.set('ugid.value', serviceConfigProperty.get('ugid.recommendedValue'));
+    }
     serviceConfigProperty = null;
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index edeaf0a..4e926ba 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -247,6 +247,40 @@ App.ServiceConfigTextField = Ember.TextField.extend(App.ServiceConfigPopoverSupp
 });
 
 /**
+ * Customized input control for user/group configs with corresponding uid/gid specified
+ * @type {Em.View}
+ */
+App.ServiceConfigTextFieldUserGroupWithID = Ember.View.extend(App.ServiceConfigPopoverSupport, {
+  valueBinding: 'serviceConfig.value',
+  placeholderBinding: 'serviceConfig.savedValue',
+  classNames: 'display-inline-block',
+
+  templateName: require('templates/wizard/controls_service_config_usergroup_with_id'),
+
+  isUIDGIDVisible: function () {
+    var overrideUidDisabled = this.get('parentView').serviceConfigs.findProperty('name', 'override_uid').value === 'false';
+    //don't display the ugid field if there is no uid/gid for this property or override_uid is unchecked
+    if (Em.isNone(this.get('serviceConfig.ugid')) || overrideUidDisabled) {
+      return false;
+    }
+
+    var serviceName = this.get('serviceConfig').name.substr(0, this.get('serviceConfig').name.indexOf('_')).toUpperCase();
+    if (serviceName === 'ZK') {
+      serviceName = 'ZOOKEEPER';
+    }
+    if (serviceName === 'MAPRED') {
+      serviceName = 'YARN';
+    }
+    //addServiceController and service already installed or Hadoop user group
+    if (App.Service.find(serviceName).get('isLoaded') || serviceName === 'USER') {
+      return false;
+    }
+
+    return this.get('parentView.isUIDGIDVisible');
+  }.property('parentView.isUIDGIDVisible')
+});
+
+/**
  * Customized input control with Units type specified
  * @type {Em.View}
  */
@@ -415,6 +449,11 @@ var checkboxConfigView = Ember.Checkbox.extend(App.ServiceConfigPopoverSupport,
       this.set('serviceConfig.value', this.get(this.get('checked') + 'Value'));
       this.get('serviceConfig').set("editDone", true);
       this.sendRequestRorDependentConfigs(this.get('serviceConfig'));
+
+      //if the checkbox being toggled is the 'Have Ambari manage UIDs' in Misc Tab, show/hide uid/gid column accordingly
+      if (this.get('serviceConfig.name') === 'override_uid') {
+         this.set('parentView.isUIDGIDVisible', this.get('checked'));
+      }
     }
   }.observes('checked'),
 


[50/50] [abbrv] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-12556

Posted by nc...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-12556


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6283ae4f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6283ae4f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6283ae4f

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 6283ae4f688fbbe04cbb904d4469d3b0f9203335
Parents: c0201e2 f500c9e
Author: Nate Cole <nc...@hortonworks.com>
Authored: Mon Jul 24 10:57:08 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Mon Jul 24 11:23:33 2017 -0400

----------------------------------------------------------------------
 .../authentication/AuthenticationMainCtrl.js    |    4 +-
 .../controllers/groups/GroupsEditCtrl.js        |    3 +
 .../stackVersions/StackVersionsCreateCtrl.js    |    3 +-
 .../ui/admin-web/app/scripts/i18n.config.js     |    7 -
 .../resource_management/TestUserResource.py     |    2 +-
 .../python/ambari_commons/get_ambari_version.py |   44 +
 .../ambari_commons/resources/os_family.json     |    3 +-
 .../python/resource_management/core/base.py     |   11 +
 .../core/providers/accounts.py                  |    6 +-
 .../core/resources/accounts.py                  |    4 +-
 .../libraries/functions/check_process_status.py |   20 +
 .../libraries/functions/conf_select.py          |   56 +-
 .../libraries/functions/get_stack_version.py    |    4 +-
 .../libraries/functions/solr_cloud_util.py      |   10 +
 .../libraries/functions/stack_features.py       |   41 +-
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/providers/hdfs_resource.py        |   48 +-
 .../libraries/script/script.py                  |   21 +-
 .../src/main/resources/solr                     |  826 ++++++++---
 ambari-infra/ambari-infra-solr-client/build.xml |    1 +
 ambari-infra/ambari-infra-solr-client/pom.xml   |   10 +
 .../ambari/infra/solr/AmbariSolrCloudCLI.java   |   14 +
 .../infra/solr/AmbariSolrCloudClient.java       |    8 +
 .../commands/RemoveAdminHandlersCommand.java    |   46 +
 .../commands/SetClusterPropertyZkCommand.java   |    6 +-
 .../src/main/resources/solrIndexHelper.sh       |  156 +++
 .../InfraRuleBasedAuthorizationPluginTest.java  |    5 +
 ambari-infra/pom.xml                            |    2 +-
 .../api/ShipperConfigElementDescription.java    |   59 +
 .../api/ShipperConfigTypeDescription.java       |   44 +
 .../config/zookeeper/LogSearchConfigZK.java     |   39 +-
 .../model/inputconfig/impl/ConditionsImpl.java  |   13 +
 .../model/inputconfig/impl/FieldsImpl.java      |   14 +
 .../inputconfig/impl/FilterDescriptorImpl.java  |   51 +
 .../impl/FilterGrokDescriptorImpl.java          |   24 +
 .../impl/FilterKeyValueDescriptorImpl.java      |   28 +
 .../model/inputconfig/impl/InputConfigImpl.java |   18 +
 .../inputconfig/impl/InputDescriptorImpl.java   |  101 ++
 .../impl/InputFileBaseDescriptorImpl.java       |   27 +
 .../impl/InputS3FileDescriptorImpl.java         |   16 +
 .../impl/MapAnonymizeDescriptorImpl.java        |   21 +-
 .../inputconfig/impl/MapDateDescriptorImpl.java |   20 +-
 .../impl/MapFieldCopyDescriptorImpl.java        |   14 +-
 .../impl/MapFieldDescriptorImpl.java            |   33 +
 .../impl/MapFieldNameDescriptorImpl.java        |   14 +-
 .../impl/MapFieldValueDescriptorImpl.java       |   20 +-
 .../inputconfig/impl/PostMapValuesAdapter.java  |    2 +-
 .../ambari-logsearch-logfeeder/docs/filter.md   |    4 +-
 .../ambari-logsearch-logfeeder/docs/input.md    |   10 +-
 .../docs/postMapValues.md                       |    2 +-
 .../logfeeder/common/LogEntryParseTester.java   |    2 +-
 .../ambari/logfeeder/mapper/MapperDate.java     |   42 +-
 .../ambari/logfeeder/filter/FilterJSONTest.java |   12 +-
 .../configsets/audit_logs/conf/solrconfig.xml   |    3 +-
 .../configsets/hadoop_logs/conf/solrconfig.xml  |    3 +-
 .../main/configsets/history/conf/solrconfig.xml |    3 +-
 .../common/ShipperConfigDescriptionStorage.java |   67 +
 .../logsearch/dao/SolrSchemaFieldDao.java       |    2 +-
 .../ambari/logsearch/doc/DocConstants.java      |    1 +
 .../ambari/logsearch/manager/InfoManager.java   |    9 +
 .../response/ShipperConfigDescriptionData.java  |   52 +
 .../ambari/logsearch/rest/InfoResource.java     |   10 +
 ambari-logsearch/docker/Dockerfile              |    2 +-
 ambari-logsearch/docker/bin/start.sh            |    4 +-
 ambari-logsearch/docker/logsearch-docker.sh     |   22 +-
 ambari-logsearch/pom.xml                        |    2 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |    6 +-
 ambari-server/pom.xml                           |    2 +-
 ambari-server/sbin/ambari-server                |    6 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   34 +-
 .../server/api/query/JpaPredicateVisitor.java   |    8 +-
 .../server/checks/AbstractCheckDescriptor.java  |    8 +-
 .../server/checks/ConfigurationMergeCheck.java  |    2 +-
 .../checks/HostsMasterMaintenanceCheck.java     |    4 +-
 .../checks/HostsRepositoryVersionCheck.java     |    8 +-
 .../server/checks/InstallPackagesCheck.java     |    6 +-
 .../server/configuration/Configuration.java     |    6 +-
 .../controller/ActionExecutionContext.java      |   28 +
 .../controller/AmbariActionExecutionHelper.java |   27 +-
 .../AmbariCustomCommandExecutionHelper.java     |   28 +-
 .../AmbariManagementControllerImpl.java         |  145 +-
 .../server/controller/AuthToLocalBuilder.java   |  328 +++--
 .../controller/DeleteIdentityHandler.java       |   77 +-
 .../server/controller/KerberosHelper.java       |    2 +-
 .../server/controller/KerberosHelperImpl.java   |    5 +-
 .../server/controller/PrereqCheckRequest.java   |   39 +-
 .../internal/AbstractProviderModule.java        |    1 +
 .../BlueprintConfigurationProcessor.java        |  123 +-
 .../internal/ClientConfigResourceProvider.java  |    5 +-
 .../ClusterStackVersionResourceProvider.java    |  163 ++-
 .../internal/HostResourceProvider.java          |    1 +
 .../internal/HttpPropertyProvider.java          |   27 +-
 .../PreUpgradeCheckResourceProvider.java        |   80 +-
 .../internal/ReadOnlyResourceProvider.java      |    2 +-
 .../server/controller/internal/Stack.java       |    2 +-
 .../server/controller/internal/UnitUpdater.java |  150 ++
 .../internal/UpgradeResourceProvider.java       |   91 +-
 .../utilities/KerberosIdentityCleaner.java      |   88 +-
 .../utilities/RemovableIdentities.java          |  145 ++
 .../controller/utilities/UsedIdentities.java    |  101 ++
 .../ServiceComponentUninstalledEvent.java       |    6 +
 .../server/events/ServiceRemovedEvent.java      |   29 +-
 .../listeners/upgrade/StackVersionListener.java |   33 +-
 .../ambari/server/orm/dao/ClusterDAO.java       |   15 +
 .../orm/entities/ClusterConfigEntity.java       |    3 +
 .../LdapToPamMigrationHelper.java               |   73 +
 .../server/security/authorization/Users.java    |    4 +
 .../upgrades/UpgradeUserKerberosDescriptor.java |  142 +-
 .../org/apache/ambari/server/state/Cluster.java |    7 +
 .../ambari/server/state/ConfigHelper.java       |   78 +-
 .../ambari/server/state/PropertyInfo.java       |    2 +
 .../apache/ambari/server/state/ServiceImpl.java |   14 +-
 .../ambari/server/state/UpgradeContext.java     |   29 +-
 .../ambari/server/state/UpgradeHelper.java      |   59 +-
 .../server/state/cluster/ClusterImpl.java       |    9 +
 .../AbstractKerberosDescriptorContainer.java    |   12 +
 .../kerberos/KerberosComponentDescriptor.java   |   15 -
 .../KerberosDescriptorUpdateHelper.java         |    9 +-
 .../kerberos/KerberosIdentityDescriptor.java    |   14 +-
 .../ambari/server/topology/AmbariContext.java   |   46 +-
 .../validators/TopologyValidatorFactory.java    |    2 +-
 .../validators/UnitValidatedProperty.java       |   95 ++
 .../topology/validators/UnitValidator.java      |   79 ++
 .../server/upgrade/UpgradeCatalog252.java       |   62 +
 .../server/upgrade/UpgradeCatalog300.java       |   18 +
 ambari-server/src/main/python/ambari-server.py  |   14 +-
 .../main/python/ambari_server/setupActions.py   |    1 +
 .../main/python/ambari_server/setupSecurity.py  |  119 +-
 .../0.1.0/package/scripts/params.py             |    3 +
 .../0.1.0/package/scripts/setup_infra_solr.py   |   17 +-
 .../0.1.0.2.3/package/scripts/atlas_client.py   |    2 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |    2 +
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |   12 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |    3 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |    6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |    8 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   25 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |    4 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |    9 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |    7 +
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |    4 +
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |    8 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      |    8 +
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |   17 +-
 .../3.0.0.3.0/package/scripts/params_linux.py   |    8 +-
 .../3.0.0.3.0/package/scripts/params_windows.py |    7 +
 .../HDFS/3.0.0.3.0/package/scripts/utils.py     |    2 +
 .../package/templates/include_hosts_list.j2     |   21 +
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |    3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |    6 +
 .../0.12.0.2.0/package/scripts/service_check.py |    3 +-
 .../KAFKA/0.10.0.3.0/metainfo.xml               |    1 +
 .../KAFKA/0.10.0.3.0/package/scripts/kafka.py   |   10 +
 .../KAFKA/0.10.0.3.0/package/scripts/params.py  |    3 +
 .../common-services/KAFKA/0.10.0/metainfo.xml   |    1 +
 .../KAFKA/0.8.1/package/scripts/kafka.py        |   12 +
 .../KAFKA/0.8.1/package/scripts/params.py       |    3 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |    2 +-
 .../properties/audit_logs-solrconfig.xml.j2     |    3 +-
 .../properties/service_logs-solrconfig.xml.j2   |    3 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   44 +-
 .../package/scripts/oozie_server_upgrade.py     |   34 +-
 .../SPARK/1.2.1/package/scripts/params.py       |   11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |    6 +-
 .../1.2.1/package/scripts/spark_service.py      |    6 +-
 .../STORM/0.9.1/package/scripts/nimbus.py       |    8 +-
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |    6 +
 .../2.1.0.2.0/package/scripts/historyserver.py  |    2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   23 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   10 +-
 .../package/scripts/resourcemanager.py          |   18 +-
 .../YARN/2.1.0.2.0/package/scripts/service.py   |    4 +
 .../2.1.0.2.0/package/scripts/service_check.py  |    6 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |    6 +
 .../3.0.0.3.0/package/scripts/params_linux.py   |   11 +-
 .../3.0.0.3.0/package/scripts/params_windows.py |   10 +-
 .../package/scripts/resourcemanager.py          |   18 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../YARN/3.0.0.3.0/service_advisor.py           |    7 +-
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |    2 +-
 .../0.6.0.2.5/package/scripts/master.py         |    1 +
 .../3.4.5/package/scripts/params_linux.py       |    5 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../services/HDFS/configuration/hdfs-site.xml   |    6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   12 +-
 .../0.8/services/HDFS/package/scripts/params.py |   11 +-
 .../0.8/services/HDFS/package/scripts/utils.py  |    3 +
 .../package/templates/include_hosts_list.j2     |   21 +
 .../services/YARN/configuration/yarn-site.xml   |    6 +
 .../0.8/services/YARN/package/scripts/params.py |   10 +-
 .../YARN/package/scripts/resourcemanager.py     |    9 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../services/YARN/configuration/yarn-site.xml   |    6 +
 .../services/YARN/package/scripts/params.py     |    9 +-
 .../YARN/package/scripts/resourcemanager.py     |    9 +-
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../package/templates/include_hosts_list.j2     |   21 +
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../hooks/before-ANY/files/changeToSecureUid.sh |   15 +-
 .../before-ANY/scripts/shared_initialization.py |   40 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../services/YARN/configuration/yarn-site.xml   |    6 +
 .../services/YARN/package/scripts/params.py     |   10 +-
 .../YARN/package/scripts/resourcemanager.py     |    9 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |   18 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |    6 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |    6 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |    1 +
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml |    6 +-
 .../configuration/application-properties.xml    |   17 +
 .../services/HIVE/configuration/hive-env.xml    |   78 +-
 .../HIVE/configuration/hive-interactive-env.xml |   62 +-
 .../services/HIVE/configuration/hive-site.xml   |   35 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |   33 +-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |    4 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |    4 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |    1 +
 .../HDP/3.0/configuration/cluster-env.xml       |    4 +-
 .../HDP/3.0/properties/stack_features.json      |  752 +++++-----
 .../stacks/HDP/3.0/properties/stack_tools.json  |   14 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../src/main/resources/stacks/stack_advisor.py  |   20 +-
 .../checks/AbstractCheckDescriptorTest.java     |   11 +-
 .../server/checks/AtlasPresenceCheckTest.java   |    4 +-
 .../checks/ClientRetryPropertyCheckTest.java    |   22 +-
 .../checks/ComponentsInstallationCheckTest.java |   16 +-
 .../checks/ConfigurationMergeCheckTest.java     |   13 +-
 .../HiveDynamicServiceDiscoveryCheckTest.java   |   23 +-
 .../checks/HiveMultipleMetastoreCheckTest.java  |   26 +-
 .../server/checks/HostsHeartbeatCheckTest.java  |   16 +-
 .../checks/HostsMasterMaintenanceCheckTest.java |   27 +-
 .../checks/HostsRepositoryVersionCheckTest.java |   40 +-
 .../server/checks/InstallPackagesCheckTest.java |   18 +-
 ...duce2JobHistoryStatePreservingCheckTest.java |    7 +-
 .../checks/PreviousUpgradeCompletedTest.java    |   11 +-
 .../server/checks/ServicePresenceCheckTest.java |   13 +-
 .../ServicesMaintenanceModeCheckTest.java       |   16 +-
 .../ServicesNamenodeTruncateCheckTest.java      |   19 +-
 .../server/checks/ServicesUpCheckTest.java      |   17 +-
 ...nTimelineServerStatePreservingCheckTest.java |    7 +-
 .../AmbariManagementControllerImplTest.java     |   67 +-
 .../AmbariManagementControllerTest.java         |    8 +-
 .../controller/AuthToLocalBuilderTest.java      |   45 +
 .../BlueprintConfigurationProcessorTest.java    |   41 +-
 ...ClusterStackVersionResourceProviderTest.java |    4 +-
 .../internal/HttpPropertyProviderTest.java      |   11 +
 .../PreUpgradeCheckResourceProviderTest.java    |    6 +-
 .../controller/internal/UnitUpdaterTest.java    |  114 ++
 .../utilities/KerberosIdentityCleanerTest.java  |  102 +-
 .../upgrade/StackVersionListenerTest.java       |   58 +-
 .../server/orm/dao/ServiceConfigDAOTest.java    |   12 +
 .../UpgradeUserKerberosDescriptorTest.java      |   59 +-
 .../ambari/server/state/ConfigHelperTest.java   |   22 +
 .../ambari/server/state/UpgradeHelperTest.java  |   14 +-
 .../KerberosDescriptorUpdateHelperTest.java     |   70 +
 .../ClusterConfigurationRequestTest.java        |   60 +-
 .../topology/validators/UnitValidatorTest.java  |  114 ++
 .../server/upgrade/UpgradeCatalog300Test.java   |   33 +
 .../src/test/python/TestStackFeature.py         |  105 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |   23 -
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |    2 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |    2 +
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   32 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   21 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |    2 +-
 .../python/stacks/2.0.6/configs/default.json    |    2 +-
 .../2.0.6/configs/default_ams_embedded.json     |    2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |    2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |    2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |    2 +-
 .../2.0.6/configs/default_no_install.json       |    2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |    2 +-
 .../default_update_exclude_file_only.json       |    2 +-
 .../2.0.6/configs/default_with_bucket.json      |    2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |    2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |    2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |    2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |    2 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |    2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |    2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |    2 +-
 .../stacks/2.0.6/configs/repository_file.json   |    2 +-
 .../python/stacks/2.0.6/configs/secured.json    |    2 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   |  309 ++--
 .../stacks/2.1/STORM/test_storm_nimbus.py       |   60 +-
 .../stacks/2.1/configs/default-storm-start.json |   14 +
 .../test/python/stacks/2.1/configs/default.json |   13 +
 .../2.1/configs/hive-metastore-upgrade.json     |    2 +-
 .../stacks/2.1/configs/secured-storm-start.json |   13 +
 .../test/python/stacks/2.1/configs/secured.json |   15 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |    2 +-
 .../test/python/stacks/2.3/configs/ats_1_5.json |    2 +-
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py  |    3 +
 .../stacks/2.5/common/test_stack_advisor.py     |  154 +-
 .../python/stacks/2.5/configs/hsi_default.json  |    2 +-
 .../2.5/configs/hsi_default_for_restart.json    |    2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |    2 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 .../main/admin/stack_and_upgrade_controller.js  |   16 +-
 .../main/host/bulk_operations_controller.js     |   32 +-
 ambari-web/app/controllers/main/host/details.js |    2 +-
 ambari-web/app/controllers/main/service.js      |   13 +-
 .../app/controllers/wizard/step7_controller.js  |   67 +
 .../configs/stack_config_properties_mapper.js   |   14 +-
 ambari-web/app/mappers/hosts_mapper.js          |    2 +-
 ambari-web/app/messages.js                      |    6 +-
 ambari-web/app/styles/application.less          |   15 +
 .../main/host/delete_hosts_result_popup.hbs     |    8 +-
 ...ontrols_service_config_usergroup_with_id.hbs |   27 +
 ambari-web/app/utils/ajax/ajax.js               |   22 +
 ambari-web/app/utils/config.js                  |    3 +
 ambari-web/app/utils/db.js                      |   14 +-
 .../configs/service_configs_by_category_view.js |    6 +
 ambari-web/app/views/common/controls_view.js    |   39 +
 .../stack_upgrade/upgrade_version_box_view.js   |    2 +-
 ambari-web/app/views/main/host.js               |    8 +-
 .../admin/stack_and_upgrade_controller_test.js  |   40 +-
 .../test/controllers/main/service_test.js       |    4 +-
 .../upgrade_version_box_view_test.js            |   11 +-
 ambari-web/test/views/main/host_test.js         |    4 +-
 .../services/YARN/configuration/yarn-site.xml   |    6 +
 .../YARN/package/scripts/params_linux.py        |    9 +-
 .../YARN/package/scripts/params_windows.py      |   10 +-
 .../YARN/package/scripts/resourcemanager.py     |   18 +-
 .../package/templates/include_hosts_list.j2     |   21 +
 .../hive20/src/main/resources/ui/package.json   |    1 +
 .../src/main/resources/ui/package.json          |    1 +
 345 files changed, 10994 insertions(+), 5823 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
index 79f3598,37a9d2f..f83c39b
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
@@@ -78,19 -78,12 +78,19 @@@ public class HostsRepositoryVersionChec
          continue;
        }
  
-       if (null != request.getRepositoryVersion()) {
+       if (null != request.getTargetVersion()) {
          boolean found = false;
 +
 +        Set<RepositoryVersionState> allowed = EnumSet.of(RepositoryVersionState.INSTALLED,
 +            RepositoryVersionState.NOT_REQUIRED);
 +        if (request.isRevert()) {
 +          allowed.add(RepositoryVersionState.CURRENT);
 +        }
 +
          for (HostVersionEntity hve : hostVersionDaoProvider.get().findByHost(host.getHostName())) {
  
-           if (hve.getRepositoryVersion().getVersion().equals(request.getRepositoryVersion())
+           if (hve.getRepositoryVersion().getVersion().equals(request.getTargetVersion())
 -              && (hve.getState() == RepositoryVersionState.INSTALLED || hve.getState() == RepositoryVersionState.NOT_REQUIRED)) {
 +              && allowed.contains(hve.getState())) {
              found = true;
              break;
            }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
index 7b03912,acf8bc1..7f87d7d
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
@@@ -52,7 -53,7 +53,8 @@@ import org.apache.ambari.server.state.s
  import org.apache.ambari.server.state.stack.UpgradePack;
  import org.apache.ambari.server.state.stack.upgrade.Direction;
  import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 +import org.apache.commons.lang.BooleanUtils;
+ import org.apache.commons.lang.StringUtils;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
@@@ -115,9 -117,8 +120,9 @@@ public class PreUpgradeCheckResourcePro
        UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID,
        UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID,
        UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID,
-       UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID,
-       UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID,
-       UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID);
++      UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID,
+       UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID,
+       UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID);
  
  
    @SuppressWarnings("serial")
@@@ -165,25 -166,26 +170,31 @@@
          throw new NoSuchResourceException(ambariException.getMessage());
        }
  
-       String stackName = cluster.getCurrentStackVersion().getStackName();
-       String sourceStackVersion = cluster.getCurrentStackVersion().getStackVersion();
+       String repositoryVersionId = (String) propertyMap.get(
+           UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID);
+ 
+       if (StringUtils.isBlank(repositoryVersionId)) {
+         throw new SystemException(
+             String.format("%s is a required property when executing upgrade checks",
+                 UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID));
+       }
  
-       final PrereqCheckRequest upgradeCheckRequest = new PrereqCheckRequest(clusterName, upgradeType);
+       final PrereqCheckRequest upgradeCheckRequest = new PrereqCheckRequest(clusterName,
+           upgradeType);
+ 
+       StackId sourceStackId = cluster.getCurrentStackVersion();
        upgradeCheckRequest.setSourceStackId(cluster.getCurrentStackVersion());
  
-       if (propertyMap.containsKey(UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID)) {
-         String repositoryVersionId = propertyMap.get(UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).toString();
-         RepositoryVersionEntity repositoryVersionEntity = repositoryVersionDAO.findByStackNameAndVersion(stackName, repositoryVersionId);
-         // set some required properties on the check request
-         upgradeCheckRequest.setRepositoryVersion(repositoryVersionId);
-         upgradeCheckRequest.setTargetStackId(repositoryVersionEntity.getStackId());
-       }
+       RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(
+           Long.valueOf(repositoryVersionId));
+ 
+       upgradeCheckRequest.setTargetRepositoryVersion(repositoryVersion);
  
 +      if (propertyMap.containsKey(UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID)) {
 +        Boolean forRevert = BooleanUtils.toBooleanObject(propertyMap.get(UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID).toString());
 +        upgradeCheckRequest.setRevert(forRevert);
 +      }
 +
        //ambariMetaInfo.getStack(stackName, cluster.getCurrentStackVersion().getStackVersion()).getUpgradePacks()
        // TODO AMBARI-12698, filter the upgrade checks to run based on the stack and upgrade type, or the upgrade pack.
        UpgradePack upgradePack = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index d2c0ea2,0e02c77..8e7215c
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@@ -1040,8 -961,7 +1049,7 @@@ public class UpgradeContext 
  
        Predicate preUpgradeCheckPredicate = new PredicateBuilder().property(
            PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals(cluster.getClusterName()).and().property(
-           PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals(repositoryVersion.getVersion()).and().property(
 -          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID).equals(repositoryVersionId).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID).equals(m_isRevert).and().property(
            PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(type).and().property(
            PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals(preferredUpgradePack).toPredicate();
  
@@@ -1199,5 -1119,4 +1207,4 @@@
        return hostOrderItems;
      }
    }
- 
 -}
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 578bd38,dee0e6c..fe56760
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@@ -102,9 -104,12 +106,15 @@@ public class AmbariContext 
    @Inject
    ConfigFactory configFactory;
  
 +  @Inject
 +  RepositoryVersionDAO repositoryVersionDAO;
 +
+   /**
+    * Used for getting configuration property values from stack and services.
+    */
+   @Inject
+   private Provider<ConfigHelper> configHelper;
+ 
    private static AmbariManagementController controller;
    private static ClusterController clusterController;
    //todo: task id's.  Use existing mechanism for getting next task id sequence

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index f35e92e,4024f05..9b92a5f
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@@ -2336,4 -2349,60 +2346,59 @@@ public class AmbariManagementController
      verify(injector, cluster, clusters, ambariMetaInfo, service, serviceComponent, serviceComponentHost, stackId);
    }
  
+   @Test
+   public void testCreateClusterWithRepository() throws Exception {
+     Injector injector = createNiceMock(Injector.class);
+ 
+     RepositoryVersionEntity repoVersion = createNiceMock(RepositoryVersionEntity.class);
+     RepositoryVersionDAO repoVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+     expect(repoVersionDAO.findByStackAndVersion(anyObject(StackId.class),
+         anyObject(String.class))).andReturn(repoVersion).anyTimes();
+ 
+     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).atLeastOnce();
+     expect(injector.getInstance(Gson.class)).andReturn(null);
+     expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class));
+ 
+     StackId stackId = new StackId("HDP-2.1");
+ 
+     Cluster cluster = createNiceMock(Cluster.class);
+     Service service = createNiceMock(Service.class);
+     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
+     expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
+         .put("HDFS", service)
+         .build());
+ 
+     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
+ 
+ 
+     StackInfo stackInfo = createNiceMock(StackInfo.class);
+     expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
+ 
+     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
+     expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
+ 
+     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
+ 
+     AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+     setAmbariMetaInfo(ambariMetaInfo, controller);
+     Class<?> c = controller.getClass();
+ 
+     Field f = c.getDeclaredField("repositoryVersionDAO");
+     f.setAccessible(true);
+     f.set(controller, repoVersionDAO);
+ 
+     Properties p = new Properties();
+     p.setProperty("", "");
+     Configuration configuration = new Configuration(p);
+     f = c.getDeclaredField("configs");
+     f.setAccessible(true);
+     f.set(controller, configuration);
+ 
+     ClusterRequest cr = new ClusterRequest(null, "c1", "HDP-2.1", null);
+     cr.setRepositoryVersion("2.1.1.0-1234");
+     controller.createCluster(cr);
+ 
+     // verification
+     verify(injector, clusters, ambariMetaInfo, stackInfo, cluster, repoVersionDAO, repoVersion);
+   }
 -
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/6283ae4f/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------


[12/50] [abbrv] ambari git commit: AMBARI-21480. NPE during "Update Kerberos Descriptor" (rlevas)

Posted by nc...@apache.org.
AMBARI-21480. NPE during "Update Kerberos Descriptor" (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a8c397b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a8c397b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a8c397b

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 0a8c397bd1944b8787befdff08bf6b95b9afb225
Parents: 4e1da58
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 16:42:16 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 16:42:16 2017 -0400

----------------------------------------------------------------------
 .../KerberosDescriptorUpdateHelper.java         |  9 ++-
 .../KerberosDescriptorUpdateHelperTest.java     | 70 ++++++++++++++++++++
 2 files changed, 77 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0a8c397b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
index f05b62b..dd865be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
@@ -340,6 +340,11 @@ public class KerberosDescriptorUpdateHelper {
   /**
    * Processes a {@link KerberosIdentityDescriptor} to change the user-supplied data based on the changes
    * observed between the previous stack version's data and the new stack version's data.
+   * <p>
+   * It is expected that <code>newStackIdentities</code> and <code>userIdentities</code> are not null.
+   * However, <code>previousStackIdentities</code> may be null in the event the user added a Kerberos
+   * identity that was then added in the new Kerberos descriptor.  In this case, the user's values
+   * for the principal name and keytab file are kept while adding any other changes from tne new stack.
    *
    * @param previousStackIdentity a {@link KerberosIdentityDescriptor} from the previous stack version's Kerberos descriptor
    * @param newStackIdentity      a {@link KerberosIdentityDescriptor} from the new stack version's Kerberos descriptor
@@ -357,7 +362,7 @@ public class KerberosDescriptorUpdateHelper {
     // If the new identity definition is a reference and no longer has a principal definition,
     // Ignore any user changes to the old principal definition.
     if (updatedValuePrincipal != null) {
-      KerberosPrincipalDescriptor oldValuePrincipal = previousStackIdentity.getPrincipalDescriptor();
+      KerberosPrincipalDescriptor oldValuePrincipal = (previousStackIdentity == null) ? null : previousStackIdentity.getPrincipalDescriptor();
       String previousValuePrincipalValue = null;
       KerberosPrincipalDescriptor userValuePrincipal = userIdentity.getPrincipalDescriptor();
       String userValuePrincipalValue = null;
@@ -380,7 +385,7 @@ public class KerberosDescriptorUpdateHelper {
     // If the new identity definition is a reference and no longer has a keytab definition,
     // Ignore any user changes to the old keytab definition.
     if (updatedValueKeytab != null) {
-      KerberosKeytabDescriptor oldValueKeytab = previousStackIdentity.getKeytabDescriptor();
+      KerberosKeytabDescriptor oldValueKeytab = (previousStackIdentity == null) ? null : previousStackIdentity.getKeytabDescriptor();
       String previousValueKeytabFile = null;
       KerberosKeytabDescriptor userValueKeytab = userIdentity.getKeytabDescriptor();
       String userValueKeytabFile = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a8c397b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index 247d17e..e717190 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@ -272,6 +272,16 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "      \"keytab\": {" +
             "        \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
             "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"future_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"CHANGED_future${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/future.user.keytab\"" +
+            "      }" +
             "    }" +
             "  ]" +
             "}");
@@ -328,6 +338,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "      \"keytab\": {" +
             "        \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
             "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"custom_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"custom${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/custom.user.keytab\"" +
+            "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"future_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"future${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/future.user.keytab\"" +
+            "      }" +
             "    }" +
             "  ]" +
             "}");
@@ -343,6 +373,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "{\n" +
                 "  \"identities\": [\n" +
                 "    {\n" +
+                "      \"name\": \"future_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"future${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
+                "      \"name\": \"custom_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
                 "      \"name\": \"spnego\",\n" +
                 "      \"principal\": {\n" +
                 "        \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +
@@ -405,6 +455,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
                 "      }\n" +
                 "    },\n" +
                 "    {\n" +
+                "      \"name\": \"custom_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
+                "      \"name\": \"future_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"future${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
                 "      \"name\": \"spnego\",\n" +
                 "      \"principal\": {\n" +
                 "        \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +


[44/50] [abbrv] ambari git commit: Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (Alejandro Fernandez via smohanty)

Posted by nc...@apache.org.
Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (Alejandro Fernandez via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eca55998
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eca55998
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eca55998

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: eca55998d38f6a8edbb156d84c6cc963c68e85a9
Parents: cfd7bb4
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Jul 21 12:07:34 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Jul 21 12:10:28 2017 -0700

----------------------------------------------------------------------
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 44 ++++++++++++++++----
 .../package/scripts/oozie_server_upgrade.py     | 34 +++++++++------
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 32 ++++++++++----
 3 files changed, 84 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eca55998/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index d916d3b..3467ed2 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -215,7 +215,27 @@ def oozie_ownership():
     group = params.user_group
   )
 
-def oozie_server_specific():
+def get_oozie_ext_zip_source_paths(upgrade_type, params):
+  """
+  Get an ordered list of Oozie ext zip file paths from the source stack.
+  :param upgrade_type:  Upgrade type will be None if not in the middle of a stack upgrade.
+  :param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file
+  :return: Source paths to use for Oozie extension zip file
+  """
+  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+  paths = []
+  source_ext_js_path = params.ext_js_path
+  # Preferred location used by HDP and BigInsights 4.2.5
+  if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
+    source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file
+  paths.append(source_ext_js_path)
+
+  # Alternate location used by BigInsights 4.2.0 when migrating to another stack.
+  paths.append("/var/lib/oozie/" + params.ext_js_file)
+
+  return paths
+
+def oozie_server_specific(upgrade_type):
   import params
   
   no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
@@ -249,13 +269,23 @@ def oozie_server_specific():
   )
 
   configure_cmds = []
-  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
-  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+  source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)
+  
+  # Copy the first oozie ext-2.2.zip file that is found.
+  # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+  if source_ext_zip_paths is not None:
+    for source_ext_zip_path in source_ext_zip_paths:
+      if os.path.isfile(source_ext_zip_path):
+        configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
+        configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+
+        Execute(configure_cmds,
+                not_if=no_op_test,
+                sudo=True,
+                )
+        break
   
-  Execute( configure_cmds,
-    not_if  = no_op_test,
-    sudo = True,
-  )
   
   Directory(params.oozie_webapps_conf_dir,
             owner = params.oozie_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/eca55998/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index 402c7cb..3edb042 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -86,21 +86,31 @@ class OozieUpgrade(Script):
         raise Fail("There are no files at {0} matching {1}".format(
           hadoop_client_new_lib_dir, hadoop_lzo_pattern))
 
-    # copy ext ZIP to libext dir
-    oozie_ext_zip_file = params.ext_js_path
-
     # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
     oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
 
-    if not os.path.isfile(oozie_ext_zip_file):
-      raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
-
-    Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_dir))
-    Execute(("cp", oozie_ext_zip_file, params.oozie_libext_dir), sudo=True)
-    Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
-    File(oozie_ext_zip_target_path,
-         mode=0644
-    )
+    # Copy ext ZIP to libext dir
+    # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+    source_ext_zip_paths = oozie.get_oozie_ext_zip_source_paths(upgrade_type, params)
+
+    found_at_least_one_oozie_ext_file = False
+
+    # Copy the first oozie ext-2.2.zip file that is found.
+    # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+    if source_ext_zip_paths is not None:
+      for source_ext_zip_path in source_ext_zip_paths:
+        if os.path.isfile(source_ext_zip_path):
+          found_at_least_one_oozie_ext_file = True
+          Logger.info("Copying {0} to {1}".format(source_ext_zip_path, params.oozie_libext_dir))
+          Execute(("cp", source_ext_zip_path, params.oozie_libext_dir), sudo=True)
+          Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
+          File(oozie_ext_zip_target_path,
+               mode=0644
+               )
+          break
+
+    if not found_at_least_one_oozie_ext_file:
+      raise Fail("Unable to find any Oozie source extension files from the following paths {0}".format(source_ext_zip_paths))
 
     # Redownload jdbc driver to a new current location
     oozie.download_database_library_if_needed()

http://git-wip-us.apache.org/repos/asf/ambari/blob/eca55998/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 426c36a..5ef6ad9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -44,7 +44,9 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
-  def test_configure_default(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default(self, isfile_mock, call_mocks):
+    isfile_mock.return_value = True
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -59,7 +61,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, False, True]))
-  def test_configure_default_mysql(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default_mysql(self, isfile_mock, iscall_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -281,7 +286,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, False, True]))
-  def test_configure_existing_sqla(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_existing_sqla(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -602,7 +610,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
-  def test_configure_secured(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_secured(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -616,8 +627,11 @@ class TestOozieServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(shell, "call")
+  @patch("os.path.isfile")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
-  def test_configure_secured_ha(self, call_mocks):
+  def test_configure_secured_ha(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effects = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
 
     config_file = "stacks/2.0.6/configs/secured.json"
@@ -650,7 +664,8 @@ class TestOozieServer(RMFTestCase):
   @patch("os.path.isfile")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
   def test_start_secured(self, isfile_mock, call_mocks):
-    isfile_mock.return_value = True
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effects = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
@@ -1133,7 +1148,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True]))
-  def test_configure_default_hdp22(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default_hdp22(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     config_file = "stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:


[13/50] [abbrv] ambari git commit: AMBARI-21460. Add new kafka client properties to the ambari managed atlas config (smohanty)

Posted by nc...@apache.org.
AMBARI-21460. Add new kafka client properties to the ambari managed atlas config (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c7f42285
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c7f42285
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c7f42285

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: c7f42285a2bd36a215b6c8988cfd7fd025461285
Parents: 0a8c397
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Jul 14 15:42:52 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Jul 14 15:44:25 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml     |  4 ++++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml    |  6 +++++-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml        |  1 +
 .../ATLAS/configuration/application-properties.xml | 17 +++++++++++++++++
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml     |  4 ++++
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml    |  4 ++++
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml        |  1 +
 7 files changed, 36 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 30796cc..db3ef59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -153,6 +153,10 @@
             <type>atlas-env</type>
             <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+            <type>application-properties</type>
+            <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 1f37389..cfd429f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -456,7 +456,7 @@
           <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
-      
+
       <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas TLS Exclude Protocols">
         <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol">
           <summary>Updating Atlas TLS Exclude Protocols to exclude TLS v1.2</summary>
@@ -475,6 +475,10 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+        <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+      </execute-stage>
+
       <!--KAFKA-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
         <task xsi:type="configure" id="kafka_log4j_parameterize">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 22c9a8d..840b17d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1101,6 +1101,7 @@
           <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol"/>
           <task xsi:type="configure" id="increase_atlas_zookeeper_timeouts"/>
           <task xsi:type="configure" id="atlas_env_gc_worker"/>
+          <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
index 91de1b0..c271dc3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
@@ -92,4 +92,21 @@
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>atlas.kafka.session.timeout.ms</name>
+    <value>30000</value>
+    <description>New Kafka consumer API</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.enable.auto.commit</name>
+    <value>false</value>
+    <description>New Kafka consumer API</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.auto.commit.enable</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 6dd2129..c2c1532 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -171,6 +171,10 @@
             <type>atlas-env</type>
             <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+            <type>application-properties</type>
+            <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index e262971..df609cd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -358,6 +358,10 @@
         <task xsi:type="configure" id="atlas_env_gc_worker"/>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+        <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+      </execute-stage>
+
       <!-- KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger Kms plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_kms_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 6b01ce9..b376fa7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -1034,6 +1034,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_atlas_plugin_cluster_name"/>
           <task xsi:type="configure" id="atlas_env_gc_worker"/>
+          <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>


[07/50] [abbrv] ambari git commit: AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)

Posted by nc...@apache.org.
AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/63186bf3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/63186bf3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/63186bf3

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 63186bf3eb1bf0501e0c2450f85467a0bc6adf12
Parents: 853a5d4
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Jul 14 12:35:26 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Fri Jul 14 12:35:52 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml              | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/63186bf3/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
index 4032b2c..80ac2bb 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -95,7 +95,7 @@ export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
 ## Kerberos ticket refresh setting
 ##
 export KINIT_FAIL_THRESHOLD=5
-export LAUNCH_KERBEROS_REFRESH_INTERVAL=1d
+export KERBEROS_REFRESH_INTERVAL=1d
 
 ## Use provided spark installation ##
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit


[31/50] [abbrv] ambari git commit: AMBARI-21521 Include/exclude files update is triggered when master is not started if cluster is deployed via BP (dsen)

Posted by nc...@apache.org.
AMBARI-21521 Include/exclude files update is triggered when master is not started if cluster is deployed via BP (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a42f535
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a42f535
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a42f535

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 0a42f535447b8a09f57bac3fc354feebeb1750c8
Parents: 30cd715
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed Jul 19 18:38:00 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed Jul 19 18:38:00 2017 +0300

----------------------------------------------------------------------
 .../controller/AmbariManagementControllerImpl.java   | 15 +++++++++++++--
 .../AmbariManagementControllerImplTest.java          |  1 -
 2 files changed, 13 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0a42f535/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index fac7b94..4229d34 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -3358,9 +3358,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               }
             }
             try {
-              Service s = cluster.getService(serviceName);
               //Filter services whose masters are not started
-              if (s.getServiceComponent(masterComponentName).getDesiredState() == State.STARTED) {
+              if (isServiceComponentStartedOnAnyHost(cluster, serviceName, masterComponentName)) {
                 serviceMasterForDecommissionMap.put(serviceName, masterComponentName);
               } else {
                 LOG.info(String.format("Not adding %s service from include/exclude files refresh map because it's master is not started", serviceName));
@@ -3389,6 +3388,18 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
   }
 
+  private boolean isServiceComponentStartedOnAnyHost(Cluster cluster, String serviceName, String masterComponentName) throws AmbariException {
+    Service service = cluster.getService(serviceName);
+    ServiceComponent serviceComponent = service.getServiceComponent(masterComponentName);
+    Map<String, ServiceComponentHost> schMap = serviceComponent.getServiceComponentHosts();
+    for (ServiceComponentHost sch : schMap.values()) {
+       if (sch.getState() == State.STARTED) {
+         return true;
+       }
+    }
+    return false;
+  }
+
   @Override
   public RequestStatusResponse createAndPersistStages(Cluster cluster, Map<String, String> requestProperties,
                                                       Map<String, String> requestParameters,

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a42f535/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 1f2c332..4024f05 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2405,5 +2405,4 @@ public class AmbariManagementControllerImplTest {
     verify(injector, clusters, ambariMetaInfo, stackInfo, cluster, repoVersionDAO, repoVersion);
   }
 
-
 }


[27/50] [abbrv] ambari git commit: AMBARI-21473. Zeppelin does not start and returns: params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']: KeyError: 'phoenix.url'(Prabhjyot Singh via Venkata Sairam)

Posted by nc...@apache.org.
AMBARI-21473. Zeppelin does not start and returns: params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']: KeyError: 'phoenix.url'(Prabhjyot Singh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/56f05f09
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/56f05f09
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/56f05f09

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 56f05f099aae18d11f849266d14bccf36ae79ad0
Parents: 8de6517
Author: Venkata Sairam <ve...@gmail.com>
Authored: Wed Jul 19 14:27:31 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Wed Jul 19 14:27:31 2017 +0530

----------------------------------------------------------------------
 .../common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py    | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/56f05f09/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index 0013ab0..ba46dc8 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -295,6 +295,7 @@ class Master(Script):
           interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
           if params.zookeeper_znode_parent \
               and params.hbase_zookeeper_quorum \
+              and 'phoenix.url' in interpreter['properties'] \
               and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
             interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
                                                        params.hbase_zookeeper_quorum + ':' + \


[22/50] [abbrv] ambari git commit: AMBARI-21386. After install packages, upgrade button does not work (alexantonenko)

Posted by nc...@apache.org.
AMBARI-21386. After install packages, upgrade button does not work (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/01d60f4f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/01d60f4f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/01d60f4f

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 01d60f4f8b8c32238ba840e109e7b7e8b9de1774
Parents: ba2a29f
Author: Alex Antonenko <hi...@gmail.com>
Authored: Mon Jul 10 15:21:14 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Mon Jul 17 18:24:07 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/utils/ajax/ajax.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/01d60f4f/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index d6e6dfa..f77b386 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2287,7 +2287,7 @@ var urls = {
     mock: '/data/users/privileges_{userName}.json'
   },
   'router.login.clusters': {
-    'real': '/clusters?fields=Clusters/provisioning_state,Clusters/security_type',
+    'real': '/clusters?fields=Clusters/provisioning_state,Clusters/security_type,Clusters/version',
     'mock': '/data/clusters/info.json'
   },
   'router.login.message': {


[48/50] [abbrv] ambari git commit: AMBARI-21418. Ambari rebuilds custom auth_to_local rules changing its case sensitiveness option (/L) depending on the case_insensitive_username_rules. (amagyar)

Posted by nc...@apache.org.
AMBARI-21418. Ambari rebuilds custom auth_to_local rules changing its case sensitiveness option (/L) depending on the case_insensitive_username_rules. (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bfe772be
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bfe772be
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bfe772be

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: bfe772be2858dbf488aa940b0e513141fb018dc1
Parents: 95e7719
Author: Attila Magyar <am...@hortonworks.com>
Authored: Mon Jul 24 14:55:50 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Mon Jul 24 14:55:50 2017 +0200

----------------------------------------------------------------------
 .../server/controller/AuthToLocalBuilder.java   | 328 +++++++++++--------
 .../controller/AuthToLocalBuilderTest.java      |  45 +++
 2 files changed, 241 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bfe772be/ambari-server/src/main/java/org/apache/ambari/server/controller/AuthToLocalBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AuthToLocalBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AuthToLocalBuilder.java
index 1d4abdd..b301ed2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AuthToLocalBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AuthToLocalBuilder.java
@@ -28,7 +28,11 @@ import java.util.TreeSet;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import javax.annotation.Nullable;
+
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 
 /**
  * AuthToLocalBuilder helps to create auth_to_local rules for use in configuration files like
@@ -104,7 +108,7 @@ public class AuthToLocalBuilder implements Cloneable {
     this.defaultRealm = defaultRealm;
 
     this.additionalRealms = (additionalRealms == null)
-        ? Collections.<String>emptySet()
+        ? Collections.emptySet()
         : Collections.unmodifiableSet(new HashSet<>(additionalRealms));
 
     this.caseInsensitiveUser = caseInsensitiveUserSupport;
@@ -126,20 +130,22 @@ public class AuthToLocalBuilder implements Cloneable {
    *
    * @param authToLocalRules config property value containing the existing rules
    */
-  public void addRules(String authToLocalRules) {
+  public AuthToLocalBuilder addRules(String authToLocalRules) {
     if (!StringUtils.isEmpty(authToLocalRules)) {
       String[] rules = authToLocalRules.split("RULE:|DEFAULT");
       for (String r : rules) {
         r = r.trim();
         if (!r.isEmpty()) {
           Rule rule = createRule(r);
-          setRules.add(rule);
+          if (!setRules.contains(rule.caseSensitivityInverted())) {
+            setRules.add(rule);
+          }
         }
       }
     }
+    return this;
   }
 
-
   /**
    * Adds a rule for the given principal and local user.
    * The principal must contain a realm component.
@@ -157,7 +163,7 @@ public class AuthToLocalBuilder implements Cloneable {
    * @param localUsername a string declaring that local username to map the principal to
    * @throws IllegalArgumentException if the provided principal doesn't contain a realm element
    */
-  public void addRule(String principal, String localUsername) {
+  public AuthToLocalBuilder addRule(String principal, String localUsername) {
     if (!StringUtils.isEmpty(principal) && !StringUtils.isEmpty(localUsername)) {
       Principal p = new Principal(principal);
       if (p.getRealm() == null) {
@@ -168,6 +174,7 @@ public class AuthToLocalBuilder implements Cloneable {
       Rule rule = createHostAgnosticRule(p, localUsername);
       setRules.add(rule);
     }
+    return this;
   }
 
   /**
@@ -264,10 +271,10 @@ public class AuthToLocalBuilder implements Cloneable {
   private Rule createHostAgnosticRule(Principal principal, String localUser) {
     List<String> principalComponents = principal.getComponents();
     int componentCount = principalComponents.size();
-
-    return new Rule(principal, componentCount, 1, String.format(
-        "RULE:[%d:$1@$0](%s@%s)s/.*/%s/", componentCount,
-        principal.getComponent(1), principal.getRealm(), localUser));
+    return new Rule(
+      MatchingRule.ignoreHostWhenComponentCountIs(componentCount),
+      new Principal(principal.getComponent(1) + "@" + principal.getRealm()),
+      new Substitution(".*", localUser, "", false));
   }
 
   /**
@@ -278,10 +285,10 @@ public class AuthToLocalBuilder implements Cloneable {
    * @return a new default realm rule
    */
   private Rule createDefaultRealmRule(String realm, boolean caseInsensitive) {
-    String caseSensitivityRule = caseInsensitive ? "/L" : "";
-
-    return new Rule(new Principal(String.format(".*@%s", realm)),
-        1, 1, String.format("RULE:[1:$1@$0](.*@%s)s/@.*//" + caseSensitivityRule, realm));
+    return new Rule(
+      MatchingRule.ignoreHostWhenComponentCountIs(1),
+      new Principal(".*@" + realm),
+      new Substitution("@.*", "", "", caseInsensitive));
   }
 
   /**
@@ -291,7 +298,7 @@ public class AuthToLocalBuilder implements Cloneable {
    * @return a new rule which matches the provided string representation
    */
   private Rule createRule(String rule) {
-    return new Rule(rule.startsWith("RULE:") ? rule : String.format("RULE:%s", rule));
+    return Rule.parse(rule.startsWith("RULE:") ? rule : String.format("RULE:%s", rule));
   }
 
   /**
@@ -317,124 +324,68 @@ public class AuthToLocalBuilder implements Cloneable {
     return collection;
   }
 
-
   /**
-   * Rule implementation.
+   * I represent an auth-to-local rule that maps a principal of the form username/hostname@REALM to username.
    */
   private static class Rule implements Comparable<Rule> {
-    /**
-     * pattern used to parse existing rules
-     */
     private static final Pattern PATTERN_RULE_PARSE =
-        Pattern.compile("RULE:\\s*\\[\\s*(\\d)\\s*:\\s*(.+?)(?:@(.+?))??\\s*\\]\\s*\\((.+?)\\)\\s*s/(.*?)/(.*?)/([a-zA-Z]*)(?:.|\n)*");
+      Pattern.compile("RULE:\\s*\\[\\s*(\\d)\\s*:\\s*(.+?)(?:@(.+?))??\\s*\\]\\s*\\((.+?)\\)\\s*s/(.*?)/(.*?)/([a-zA-Z]*)((/L)?)(?:.|\n)*");
+    private final MatchingRule matchingRule;
+    private final Principal principal;
+    private final Substitution substitution;
 
     /**
-     * associated principal
+     * @param rule in the following format RULE:[n:string](regexp)s/pattern/replacement/[modifier]/[L]
      */
-    private Principal principal;
-
-    /**
-     * string representation of the rule
-     */
-    private String rule;
-
-    /**
-     * expected component count
-     */
-    private int expectedComponentCount;
-
-    /**
-     * number of components being matched in the rule
-     */
-    private int matchComponentCount;
-
-    /**
-     * Constructor.
-     *
-     * @param principal              principal
-     * @param expectedComponentCount number of components needed by a principal to match
-     * @param matchComponentCount    number of components which are included in the rule evaluation
-     * @param rule                   string representation of the rule
-     */
-    public Rule(Principal principal, int expectedComponentCount, int matchComponentCount, String rule) {
-      this.principal = principal;
-      this.expectedComponentCount = expectedComponentCount;
-      this.matchComponentCount = matchComponentCount;
-      this.rule = rule;
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param rule string representation of the rule
-     */
-    public Rule(String rule) {
-      //this.rule = rule;
+    public static Rule parse(String rule) {
       Matcher m = PATTERN_RULE_PARSE.matcher(rule);
       if (!m.matches()) {
         throw new IllegalArgumentException("Invalid rule: " + rule);
       }
-      expectedComponentCount = Integer.valueOf(m.group(1));
-
+      int expectedComponentCount = Integer.valueOf(m.group(1));
       String matchPattern = m.group(2);
-      matchComponentCount = (matchPattern.startsWith("$") ?
-          matchPattern.substring(1) :
-          matchPattern).
-          split("\\$").length;
-      String patternRealm = m.group(3);
-      principal = new Principal(m.group(4));
+      String optionalPatternRealm = m.group(3);
+      String matchingRegexp = m.group(4);
       String replacementPattern = m.group(5);
       String replacementReplacement = m.group(6);
       String replacementModifier = m.group(7);
-      if (patternRealm != null) {
-        this.rule = String.format("RULE:[%d:%s@%s](%s)s/%s/%s/%s",
-            expectedComponentCount, matchPattern, patternRealm,
-            principal.toString(), replacementPattern, replacementReplacement, replacementModifier);
-      } else {
-        this.rule = String.format("RULE:[%d:%s](%s)s/%s/%s/%s",
-            expectedComponentCount, matchPattern,
-            principal.toString(), replacementPattern, replacementReplacement, replacementModifier);
-      }
+      String caseSensitivity = m.group(8);
+      return new Rule(
+        new MatchingRule(expectedComponentCount, matchPattern, optionalPatternRealm),
+        new Principal(matchingRegexp),
+        new Substitution(replacementPattern, replacementReplacement, replacementModifier, !caseSensitivity.isEmpty()));
     }
 
-    /**
-     * Get the associated principal.
-     *
-     * @return associated principal
-     */
-    public Principal getPrincipal() {
-      return principal;
+    public Rule(MatchingRule matchingRule, Principal principal, Substitution substitution) {
+      this.matchingRule = matchingRule;
+      this.principal = principal;
+      this.substitution = substitution;
     }
 
-    /**
-     * Get the expected component count.  This specified the number of components
-     * that a principal must contain to match this rule.
-     *
-     * @return the expected component count
-     */
-    public int getExpectedComponentCount() {
-      return expectedComponentCount;
+    @Override
+    public String toString() {
+      return String.format("RULE:%s(%s)%s", matchingRule, principal, substitution);
     }
 
-    /**
-     * Get the match component count.  This is the number of components that are evaluated
-     * when attempting to match a principal to the rule.
-     *
-     * @return the match component count
-     */
-    public int getMatchComponentCount() {
-      return matchComponentCount;
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      Rule rule = (Rule) o;
+      return new EqualsBuilder()
+        .append(matchingRule, rule.matchingRule)
+        .append(principal, rule.principal)
+        .append(substitution, rule.substitution)
+        .isEquals();
     }
 
-    /**
-     * String representation of the rule in the form
-     * RULE:[componentCount:matchString](me@foo.com)s/pattern/localUser/
-     *
-     * @return string representation of the rule
-     */
     @Override
-    public String toString() {
-      return rule;
+    public int hashCode() {
+      return new HashCodeBuilder(17, 37)
+        .append(matchingRule)
+        .append(principal)
+        .append(substitution)
+        .toHashCode();
     }
 
     /**
@@ -453,26 +404,22 @@ public class AuthToLocalBuilder implements Cloneable {
      */
     @Override
     public int compareTo(Rule other) {
-      int retVal = expectedComponentCount - other.getExpectedComponentCount();
-
+      int retVal = matchingRule.expectedComponentCount - other.matchingRule.expectedComponentCount;
       if (retVal == 0) {
-        retVal = other.getMatchComponentCount() - matchComponentCount;
-
+        retVal = other.matchingRule.matchComponentCount() - matchingRule.matchComponentCount();
         if (retVal == 0) {
-          Principal otherPrincipal = other.getPrincipal();
-          if (principal.equals(otherPrincipal)) {
-            retVal = rule.compareTo(other.rule);
+          if (this.principal.equals(other.principal)) {
+            retVal = toString().compareTo(other.toString());
           } else {
             // check for wildcard realms '.*'
-            String realm = principal.getRealm();
-            String otherRealm = otherPrincipal.getRealm();
+            String realm = this.principal.getRealm();
+            String otherRealm = other.principal.getRealm();
             retVal = compareValueWithWildcards(realm, otherRealm);
-
             if (retVal == 0) {
-              for (int i = 1; i <= matchComponentCount; i++) {
+              for (int i = 1; i <= matchingRule.matchComponentCount(); i++) {
                 // check for wildcard component
-                String component1 = principal.getComponent(1);
-                String otherComponent1 = otherPrincipal.getComponent(1);
+                String component1 = this.principal.getComponent(1);
+                String otherComponent1 = other.principal.getComponent(1);
                 retVal = compareValueWithWildcards(component1, otherComponent1);
 
                 if (retVal != 0) {
@@ -483,20 +430,9 @@ public class AuthToLocalBuilder implements Cloneable {
           }
         }
       }
-
       return retVal;
     }
 
-    @Override
-    public boolean equals(Object o) {
-      return this == o || o instanceof Rule && rule.equals(((Rule) o).rule);
-    }
-
-    @Override
-    public int hashCode() {
-      return rule.hashCode();
-    }
-
     /**
      * Compares 2 strings for use in compareTo methods but orders <code>null</code>s first and wildcards last.
      * <p/>
@@ -532,6 +468,134 @@ public class AuthToLocalBuilder implements Cloneable {
         return s1.compareTo(s2);
       }
     }
+
+    public Rule caseSensitivityInverted() {
+      return new Rule(matchingRule, principal, substitution.caseSensitivityInverted());
+    }
+  }
+
+  /**
+   * The matching rule part of an auth-to-local rule: [n:string]
+   * Indicates a matching rule where n declares the number of expected components in the principal.
+   * Components are separated by a /, where a user account has one component (ambari-qa) and a service account has two components (nn/fqdn).
+   * The string value declares how to reformat the value to be used in the rest of the expression.
+   * The placeholders are as follows:
+   *  $0 - realm
+   *  $1 - 1st component
+   *  $2 - 2nd component
+   *  For example: [2:$1@$0] matches on nn/c6501.ambari.apache.org@EXAMPLE.COM and translates to nn@EXAMPLE.COM
+   */
+  private static class MatchingRule {
+    private final int expectedComponentCount;
+    private final String matchPattern;
+    private final String realmPattern;
+
+    public static MatchingRule ignoreHostWhenComponentCountIs(int expectedComponentCount) {
+      return new MatchingRule(expectedComponentCount, "$1", "$0");
+    }
+
+    public MatchingRule(int expectedComponentCount, String matchPattern, @Nullable String realmPattern) {
+      this.expectedComponentCount = expectedComponentCount;
+      this.matchPattern = matchPattern;
+      this.realmPattern = realmPattern;
+    }
+
+    /**
+     * Get the match component count. This is the number of components that are evaluated
+     * when attempting to match a principal to the rule.
+     */
+    public int matchComponentCount() {
+      return (matchPattern.startsWith("$")
+        ? matchPattern.substring(1)
+        : matchPattern).split("\\$").length;
+    }
+
+    @Override
+    public String toString() {
+      return realmPattern != null
+        ? String.format("[%d:%s@%s]", expectedComponentCount, matchPattern, realmPattern)
+        : String.format("[%d:%s]", expectedComponentCount, matchPattern);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      MatchingRule that = (MatchingRule) o;
+      return new EqualsBuilder()
+        .append(expectedComponentCount, that.expectedComponentCount)
+        .append(matchPattern, that.matchPattern)
+        .append(realmPattern, that.realmPattern)
+        .isEquals();
+    }
+
+    @Override
+    public int hashCode() {
+      return new HashCodeBuilder(17, 37)
+        .append(expectedComponentCount)
+        .append(matchPattern)
+        .append(realmPattern)
+        .toHashCode();
+    }
+  }
+
+  /**
+   * I'm the substitution part of an auth-to-local rule.
+   * I have 4 parts:
+   *  s/pattern/replacement/g/L where the last 2 parts are optional.
+   * The pattern part of this expression is a regular expression used to find the portion of the string to replace.
+   * The replacement part of this expression is the value to use for replacing the matched section.
+   * If g is specified after the last /, the replacements will occur for every match in the value, else only the first match is processed.
+   */
+  private static class Substitution {
+    private final String pattern;
+    private final String replacement;
+    private final String modifier;
+    private final boolean caseInsensitiveUser;
+
+    public Substitution(String pattern, String replacement, String modifier, boolean caseInsensitiveUser) {
+      this.pattern = pattern;
+      this.replacement = replacement;
+      this.modifier = modifier;
+      this.caseInsensitiveUser = caseInsensitiveUser;
+    }
+
+    @Override
+    public String toString() {
+      return String.format(
+        "s/%s/%s/%s%s",
+        pattern,
+        replacement,
+        modifier,
+        caseInsensitiveUser ? "/L" : "");
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      Substitution that = (Substitution) o;
+      return new EqualsBuilder()
+        .append(caseInsensitiveUser, that.caseInsensitiveUser)
+        .append(pattern, that.pattern)
+        .append(replacement, that.replacement)
+        .append(modifier, that.modifier)
+        .isEquals();
+    }
+
+    @Override
+    public int hashCode() {
+      return new HashCodeBuilder(17, 37)
+        .append(pattern)
+        .append(replacement)
+        .append(modifier)
+        .append(caseInsensitiveUser)
+        .toHashCode();
+    }
+
+    public Substitution caseSensitivityInverted() {
+      return new Substitution(pattern, replacement, modifier, !caseInsensitiveUser);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/bfe772be/ambari-server/src/test/java/org/apache/ambari/server/controller/AuthToLocalBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AuthToLocalBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AuthToLocalBuilderTest.java
index c08247d..5953231 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AuthToLocalBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AuthToLocalBuilderTest.java
@@ -563,4 +563,49 @@ public class AuthToLocalBuilderTest {
 
     assertEquals(existingRules, builder.generate());
   }
+
+  @Test
+  public void testCustomRuleCanBeAddedWithCaseSensitivity() {
+    AuthToLocalBuilder builder = new AuthToLocalBuilder("EXAMPLE.COM", Collections.emptyList(), false)
+      .addRule("yarn/_HOST@EXAMPLE.COM", "yarn")
+      .addRules(
+      "RULE:[1:$1@$0](.*@HDP01.LOCAL)s/.*/ambari-qa//L\n" +
+    "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\n" +
+    "DEFAULT");
+    assertEquals(
+      "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
+        "RULE:[1:$1@$0](.*@HDP01.LOCAL)s/.*/ambari-qa//L\n" +
+        "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\n" +
+        "DEFAULT"
+      , builder.generate());
+  }
+
+  @Test
+  public void testCaseSensitivityFlagIsRemovedAfterItWasAddedToAmbariRule() {
+    AuthToLocalBuilder builder = new AuthToLocalBuilder("EXAMPLE.COM", Collections.emptyList(), false)
+      .addRule("yarn/_HOST@EXAMPLE.COM", "yarn")
+      .addRules(
+          "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn//L\n" +
+          "DEFAULT");
+    assertEquals(
+      "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
+        "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\n" +
+        "DEFAULT"
+      , builder.generate());
+  }
+
+  @Test
+  public void testCaseSensitivityFlagIsAddedAfterItWasFromAmbariRule() {
+    AuthToLocalBuilder builder = new AuthToLocalBuilder("EXAMPLE.COM", Collections.emptyList(), true)
+      .addRule("yarn/_HOST@EXAMPLE.COM", "yarn")
+      .addRules(
+          "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
+          "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\n" +
+          "DEFAULT");
+    assertEquals(
+      "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*///L\n" +
+        "RULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\n" +
+        "DEFAULT"
+      , builder.generate());
+  }
 }
\ No newline at end of file


[37/50] [abbrv] ambari git commit: AMBARI-21531. Client component restart fails after Ambari upgrade while running custom hook script on Suse 11 (aonishuk)

Posted by nc...@apache.org.
AMBARI-21531. Client component restart fails after Ambari upgrade while running custom hook script on Suse 11 (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8c15965e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8c15965e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8c15965e

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 8c15965e090c1666702d08b860a796015c79f679
Parents: 587c42d
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Jul 20 13:27:40 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Jul 20 13:27:40 2017 +0300

----------------------------------------------------------------------
 .../resource_management/TestUserResource.py     |  2 +-
 .../python/resource_management/core/base.py     | 11 +++++
 .../core/providers/accounts.py                  |  6 ++-
 .../core/resources/accounts.py                  |  4 +-
 .../before-ANY/scripts/shared_initialization.py | 12 +++---
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 45 +++++++++++++-------
 6 files changed, 55 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-agent/src/test/python/resource_management/TestUserResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestUserResource.py b/ambari-agent/src/test/python/resource_management/TestUserResource.py
index 97d992e..8f1df83 100644
--- a/ambari-agent/src/test/python/resource_management/TestUserResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestUserResource.py
@@ -163,7 +163,7 @@ class TestUserResource(TestCase):
     getpwnam_mock.return_value = _get_user_entity()
 
     with Environment('/') as env:
-      user = User("mapred", action = "create", uid = "1", shell = "/bin/bash")
+      user = User("mapred", action = "create", uid = 1, shell = "/bin/bash")
 
     popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh  PATH=/bin -H -E usermod -s /bin/bash -u 1 mapred"], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
     self.assertEqual(popen_mock.call_count, 1)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-common/src/main/python/resource_management/core/base.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/base.py b/ambari-common/src/main/python/resource_management/core/base.py
index 1500e1f..b862853 100644
--- a/ambari-common/src/main/python/resource_management/core/base.py
+++ b/ambari-common/src/main/python/resource_management/core/base.py
@@ -58,6 +58,17 @@ class BooleanArgument(ResourceArgument):
         "Expected a boolean for %s received %r" % (self.name, value))
     return value
 
+class IntegerArgument(ResourceArgument):
+  def validate(self, value):
+    if value is None:
+      return value
+
+    value = super(IntegerArgument, self).validate(value)
+    if not isinstance( value, int ):
+      raise InvalidArgument(
+        "Expected an integer for %s received %r" % (self.name, value))
+    return value
+
 
 class PasswordArgument(ResourceArgument):
   def log_str(self, key, value):

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-common/src/main/python/resource_management/core/providers/accounts.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/accounts.py b/ambari-common/src/main/python/resource_management/core/providers/accounts.py
index c4f2496..fa70989 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/accounts.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/accounts.py
@@ -66,7 +66,9 @@ class UserProvider(Provider):
         groups = self.resource.groups
         if self.user and self.user_groups:
           groups += self.user_groups
-        option_value = ",".join(groups)
+        option_value = ",".join(groups) 
+      elif attributes[1] == "-u" and self.user and self.user.pw_uid == getattr(self.resource, option_name):
+        option_value = None
       else:
         option_value = getattr(self.resource, option_name)
         
@@ -78,7 +80,7 @@ class UserProvider(Provider):
       return
 
     command.append(self.resource.username)
-
+    
     shell.checked_call(command, sudo=True)
 
   def action_remove(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-common/src/main/python/resource_management/core/resources/accounts.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/resources/accounts.py b/ambari-common/src/main/python/resource_management/core/resources/accounts.py
index 4ee2c57..fb3f35a 100644
--- a/ambari-common/src/main/python/resource_management/core/resources/accounts.py
+++ b/ambari-common/src/main/python/resource_management/core/resources/accounts.py
@@ -21,7 +21,7 @@ Ambari Agent
 """
 __all__ = ["Group", "User"]
 
-from resource_management.core.base import Resource, ForcedListArgument, ResourceArgument, BooleanArgument
+from resource_management.core.base import Resource, ForcedListArgument, ResourceArgument, BooleanArgument, IntegerArgument
 
 
 class Group(Resource):
@@ -37,7 +37,7 @@ class User(Resource):
   action = ForcedListArgument(default="create")
   username = ResourceArgument(default=lambda obj: obj.name)
   comment = ResourceArgument()
-  uid = ResourceArgument()
+  uid = IntegerArgument()
   gid = ResourceArgument()
   """
   If the user exists, and there are some groups, appends to existant

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index bcc1a3a..b687229 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -49,11 +49,13 @@ def setup_users():
              uid = get_uid(user),
              gid = params.user_to_gid_dict[user],
              groups = params.user_to_groups_dict[user],
+             fetch_nonlocal_groups = params.fetch_nonlocal_groups,
              )
       else:
         User(user,
              gid = params.user_to_gid_dict[user],
              groups = params.user_to_groups_dict[user],
+             fetch_nonlocal_groups = params.fetch_nonlocal_groups,
              )
 
     if params.override_uid == "true":
@@ -96,7 +98,7 @@ def create_dfs_cluster_admins():
 
   User(params.hdfs_user,
     groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    fetch_nonlocal_groups = params.fetch_nonlocal_groups
   )
 
 def create_tez_am_view_acls():
@@ -145,7 +147,7 @@ def set_uid(user, user_dirs):
        mode=0555)
   ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
   uid = get_uid(user)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
           not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
 
 def get_uid(user):
@@ -161,12 +163,12 @@ def get_uid(user):
     return uid
   else:
     if user == params.smoke_user:
-      return 0
+      return None
     File(format("{tmp_dir}/changeUid.sh"),
          content=StaticFile("changeToSecureUid.sh"),
          mode=0555)
-    conde, newUid = shell.call((format("{tmp_dir}/changeUid.sh"), format("{user}")), sudo=True)
-    return newUid
+    code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
+    return int(newUid)
 
 def setup_hadoop_env():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c15965e/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 1d2351f..a13ac24 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -58,8 +58,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'hive',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -67,8 +68,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'oozie',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'users'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -76,13 +78,15 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'nobody',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'nobody'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('User', 'ambari-qa',
                               gid = 'hadoop',
-                              uid = 0,
+                              uid = None,
                               groups = [u'users'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -90,8 +94,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'flume',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -99,8 +104,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'hdfs',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -108,8 +114,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'storm',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -117,8 +124,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'mapred',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -126,8 +134,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'hbase',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -135,8 +144,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'tez',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'users'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -144,8 +154,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'zookeeper',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -153,8 +164,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'falcon',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'users'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -162,8 +174,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'sqoop',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -171,8 +184,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'yarn',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),
@@ -180,8 +194,9 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('User', 'hcat',
                               gid = 'hadoop',
-                              uid = '1000',
+                              uid = 1000,
                               groups = [u'hadoop'],
+                              fetch_nonlocal_groups = True,
                               )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                               content = StaticFile('changeToSecureUid.sh'),


[06/50] [abbrv] ambari git commit: AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)

Posted by nc...@apache.org.
AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/853a5d4a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/853a5d4a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/853a5d4a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 853a5d4a2eda1afb5ee4578cf99d0757abc5f95d
Parents: eb1adcb
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Jul 13 22:35:28 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Jul 13 22:38:40 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.5/services/stack_advisor.py    |   5 +-
 .../src/main/resources/stacks/stack_advisor.py  |  19 ++-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 +++++++++++--------
 3 files changed, 105 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 3337e8e..4ca74ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import math
 
+
 from ambari_commons.str_utils import string_set_equals
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
@@ -774,9 +775,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = "/usr/hdp"
-    if cluster_env and "stack_root" in cluster_env:
-      stack_root = cluster_env["stack_root"]
+    stack_root = self.getStackRoot(services)
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 8e08d82..67f7fe0 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -25,6 +25,7 @@ import re
 import socket
 import string
 import traceback
+import json
 import sys
 import logging
 from math import ceil, floor
@@ -34,7 +35,6 @@ from urlparse import urlparse
 from resource_management.libraries.functions.data_structure_utils import get_from_dict
 from resource_management.core.exceptions import Fail
 
-
 class StackAdvisor(object):
   """
   Abstract class implemented by all stack advisors. Stack advisors advise on stack specific questions. 
@@ -2006,6 +2006,23 @@ class DefaultStackAdvisor(StackAdvisor):
 
     return mount_points
 
+  def getStackRoot(self, services):
+    """
+    Gets the stack root associated with the stack
+    :param services: the services structure containing the current configurations
+    :return: the stack root as specified in the config or /usr/hdp
+    """
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    stack_root = "/usr/hdp"
+    if cluster_env and "stack_root" in cluster_env:
+      stack_root_as_str = cluster_env["stack_root"]
+      stack_roots = json.loads(stack_root_as_str)
+      stack_name = cluster_env["stack_name"]
+      if stack_name in stack_roots:
+        stack_root = stack_roots[stack_name]
+
+    return stack_root
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.

http://git-wip-us.apache.org/repos/asf/ambari/blob/853a5d4a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 50f527d..bf0cbec 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -825,70 +825,80 @@ class TestHDP25StackAdvisor(TestCase):
 
     services = {
       "services": [{
-        "StackServices": {
-          "service_name": "YARN",
-        },
-        "Versions": {
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-      }, {
-        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
-        "StackServices": {
-          "service_name": "HIVE",
-          "service_version": "1.2.1.2.5",
-          "stack_name": "HDP",
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "bulk_commands_display_name": "",
-              "bulk_commands_master_component_name": "",
-              "cardinality": "0-1",
-              "component_category": "MASTER",
-              "component_name": "HIVE_SERVER_INTERACTIVE",
-              "custom_commands": ["RESTART_LLAP"],
-              "decommission_allowed": "false",
-              "display_name": "HiveServer2 Interactive",
-              "has_bulk_commands_definition": "false",
-              "is_client": "false",
-              "is_master": "true",
-              "reassign_allowed": "false",
-              "recovery_enabled": "false",
-              "service_name": "HIVE",
-              "stack_name": "HDP",
-              "stack_version": "2.5",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-            "dependencies": []
-          },
-          {
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "cardinality": "1+",
-              "component_category": "SLAVE",
-              "component_name": "NODEMANAGER",
-              "display_name": "NodeManager",
-              "is_client": "false",
-              "is_master": "false",
-              "hostnames": [
-                "c6403.ambari.apache.org"
-              ]
-            },
-            "dependencies": []
-          },
-        ]
-      }
+                     "StackServices": {
+                       "service_name": "TEZ"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "SPARK"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "YARN",
+                     },
+                     "Versions": {
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "StackServiceComponents": {
+                           "component_name": "NODEMANAGER",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         }
+                       }
+                     ]
+                   }, {
+                     "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+                     "StackServices": {
+                       "service_name": "HIVE",
+                       "service_version": "1.2.1.2.5",
+                       "stack_name": "HDP",
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "bulk_commands_display_name": "",
+                           "bulk_commands_master_component_name": "",
+                           "cardinality": "0-1",
+                           "component_category": "MASTER",
+                           "component_name": "HIVE_SERVER_INTERACTIVE",
+                           "custom_commands": ["RESTART_LLAP"],
+                           "decommission_allowed": "false",
+                           "display_name": "HiveServer2 Interactive",
+                           "has_bulk_commands_definition": "false",
+                           "is_client": "false",
+                           "is_master": "true",
+                           "reassign_allowed": "false",
+                           "recovery_enabled": "false",
+                           "service_name": "HIVE",
+                           "stack_name": "HDP",
+                           "stack_version": "2.5",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         },
+                         "dependencies": []
+                       },
+                       {
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "cardinality": "1+",
+                           "component_category": "SLAVE",
+                           "component_name": "NODEMANAGER",
+                           "display_name": "NodeManager",
+                           "is_client": "false",
+                           "is_master": "false",
+                           "hostnames": [
+                             "c6403.ambari.apache.org"
+                           ]
+                         },
+                         "dependencies": []
+                       },
+                     ]
+                   }
       ],
       "changed-configurations": [
         {
@@ -898,6 +908,12 @@ class TestHDP25StackAdvisor(TestCase):
         }
       ],
       "configurations": {
+        "cluster-env": {
+          "properties": {
+            "stack_root": "{\"HDP\":\"/usr/hdp\"}",
+            "stack_name": "HDP"
+          },
+        },
         "capacity-scheduler": {
           "properties": {
             "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
@@ -960,7 +976,8 @@ class TestHDP25StackAdvisor(TestCase):
             "tez.am.resource.memory.mb": "341"
           }
         }
-      }
+      },
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
     clusterData = {
@@ -990,6 +1007,9 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'], 'default')
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'], 'default')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes'],
+                      'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath'], '/usr/hdp/${hdp.version}/spark/hdpLib/*')
     self.assertTrue('hive-interactive-env' not in configurations)
     self.assertTrue('property_attributes' not in configurations)
 


[24/50] [abbrv] ambari git commit: AMBARI-21504. Restart of MR2 History Server failed due to null in immutable_paths.(vbrodetskyi)

Posted by nc...@apache.org.
AMBARI-21504. Restart of MR2 History Server failed due to null in immutable_paths.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/274a9951
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/274a9951
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/274a9951

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 274a9951c34721a867e8e541b4fa73bdf03aa5d0
Parents: f450eba
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jul 18 17:14:10 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jul 18 17:14:10 2017 +0300

----------------------------------------------------------------------
 .../AmbariCustomCommandExecutionHelper.java     |  7 +++++--
 .../AmbariManagementControllerImpl.java         |  3 ++-
 .../internal/ClientConfigResourceProvider.java  |  3 ++-
 .../ambari/server/state/ConfigHelper.java       | 17 +++++++++++++++
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  4 ++++
 .../YARN/2.1.0.2.0/package/scripts/service.py   |  4 ++++
 .../AmbariManagementControllerImplTest.java     | 12 ++++++++++-
 .../ambari/server/state/ConfigHelperTest.java   | 22 ++++++++++++++++++++
 8 files changed, 67 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index aeb5a9c..5180870 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -88,6 +88,7 @@ import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.PropertyInfo.PropertyType;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.Service;
@@ -431,7 +432,8 @@ public class AmbariCustomCommandExecutionHelper {
       String groupList = gson.toJson(groupSet);
       hostLevelParams.put(GROUP_LIST, groupList);
 
-      Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+      Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+      Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
       String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
       hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 
@@ -1494,7 +1496,8 @@ public class AmbariCustomCommandExecutionHelper {
     hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
 
     Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+    Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+    Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
     String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
     hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 433ed56..38842fa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2522,7 +2522,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     String groupList = gson.toJson(groupSet);
     hostParams.put(GROUP_LIST, groupList);
 
-    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties);
+    Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties);
+    Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
     String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
     hostParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 15c2d81..166fc5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -407,7 +407,8 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         String groupList = gson.toJson(groupSet);
         hostLevelParams.put(GROUP_LIST, groupList);
 
-        Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredClusterConfigs);
+        Map<org.apache.ambari.server.state.PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredClusterConfigs);
+        Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
         String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
         hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index a3a676d..2a70ee1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -220,6 +220,23 @@ public class ConfigHelper {
     return resolved;
   }
 
+
+  public Set<String> filterInvalidPropertyValues(Map<PropertyInfo, String> properties, String filteredListName) {
+    Set<String> resultSet = new HashSet<>();
+    for (Iterator<Entry<PropertyInfo, String>> iterator = properties.entrySet().iterator(); iterator.hasNext();) {
+      Entry<PropertyInfo, String> property = iterator.next();
+      PropertyInfo propertyInfo = property.getKey();
+      String propertyValue = property.getValue();
+      if (property == null || propertyValue == null || propertyValue.toLowerCase().equals("null") || propertyValue.isEmpty()) {
+        LOG.error(String.format("Excluding property %s from %s, because of invalid or empty value!", propertyInfo.getName(), filteredListName));
+        iterator.remove();
+      } else {
+        resultSet.add(propertyValue);
+      }
+    }
+    return resultSet;
+  }
+
   /**
    * Get all config properties for a cluster given a set of configType to
    * versionTags map. This helper method merges all the override tags with a

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index c554349..897e6cb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -96,6 +96,10 @@ class NameNode(Script):
     env.set_params(params)
     self.configure(env)
     hdfs_binary = self.get_hdfs_binary()
+
+    if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null':
+      Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.")
+
     namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type,
       upgrade_suspended=params.upgrade_suspended, env=env)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
index 1c1b11b..7c59b60 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
@@ -26,6 +26,7 @@ from resource_management.libraries.functions.show_logs import show_logs
 from resource_management.libraries.functions.format import format
 from resource_management.core.resources.system import Execute, File
 from resource_management.core.signal_utils import TerminateStrategy
+from resource_management.core.logger import Logger
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def service(componentName, action='start', serviceName='yarn'):
@@ -43,6 +44,9 @@ def service(componentName, action='start', serviceName='yarn'):
   import params
 
   if serviceName == 'mapreduce' and componentName == 'historyserver':
+    if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null':
+      Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.")
+
     delete_pid_file = True
     daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
     pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index eadc678..1f2c332 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2059,6 +2059,14 @@ public class AmbariManagementControllerImplTest {
     String JCE_NAME = "jceName";
     String OJDBC_JAR_NAME = "OjdbcJarName";
     String SERVER_DB_NAME = "ServerDBName";
+    Map<PropertyInfo, String> notManagedHdfsPathMap = new HashMap<>();
+    PropertyInfo propertyInfo1 = new PropertyInfo();
+    propertyInfo1.setName("1");
+    PropertyInfo propertyInfo2 = new PropertyInfo();
+    propertyInfo2.setName("2");
+    notManagedHdfsPathMap.put(propertyInfo1, "/tmp");
+    notManagedHdfsPathMap.put(propertyInfo2, "/apps/falcon");
+
     Set<String> notManagedHdfsPathSet = new HashSet<>(Arrays.asList("/tmp", "/apps/falcon"));
     Gson gson = new Gson();
 
@@ -2089,8 +2097,10 @@ public class AmbariManagementControllerImplTest {
     expect(configuration.getPreviousDatabaseConnectorNames()).andReturn(new HashMap<String, String>()).anyTimes();
     expect(repositoryVersionEntity.getVersion()).andReturn("1234").anyTimes();
     expect(repositoryVersionEntity.getStackId()).andReturn(stackId).anyTimes();
-    expect(configHelper.getPropertyValuesWithPropertyType(stackId,
+    expect(configHelper.getPropertiesWithPropertyType(stackId,
         PropertyInfo.PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs)).andReturn(
+            notManagedHdfsPathMap);
+    expect(configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST)).andReturn(
             notManagedHdfsPathSet);
 
     replay(manager, clusters, cluster, injector, stackId, configuration, repositoryVersionEntity, configHelper);

http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 857da61..6e84b33 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -764,6 +764,28 @@ public class ConfigHelperTest {
     }
 
     @Test
+    public void testFilterInvalidPropertyValues() {
+      Map<PropertyInfo, String> properties = new HashMap<>();
+      PropertyInfo prop1 = new PropertyInfo();
+      prop1.setName("1");
+      PropertyInfo prop2 = new PropertyInfo();
+      prop1.setName("2");
+      PropertyInfo prop3 = new PropertyInfo();
+      prop1.setName("3");
+      PropertyInfo prop4 = new PropertyInfo();
+      prop1.setName("4");
+
+      properties.put(prop1, "/tmp");
+      properties.put(prop2, "null");
+      properties.put(prop3, "");
+      properties.put(prop4, null);
+
+      Set<String> resultSet = configHelper.filterInvalidPropertyValues(properties, "testlist");
+      Assert.assertEquals(1, resultSet.size());
+      Assert.assertEquals(resultSet.iterator().next(), "/tmp");
+    }
+
+    @Test
     public void testMergeAttributesWithNullProperties() throws Exception {
       Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
       Map<String, String> persistedFinalAttrs = new HashMap<>();


[18/50] [abbrv] ambari git commit: AMBARI-21442. Ambari updates memory settings in blueprint incorrectly (amagyar)

Posted by nc...@apache.org.
AMBARI-21442. Ambari updates memory settings in blueprint incorrectly (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/93fe8487
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/93fe8487
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/93fe8487

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 93fe8487a16fd2fe4f0c6a19bdc5d8a7c7b304a7
Parents: f92d121
Author: Attila Magyar <am...@hortonworks.com>
Authored: Mon Jul 17 10:19:38 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Mon Jul 17 10:19:38 2017 +0200

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  64 +++-----
 .../server/controller/internal/Stack.java       |   2 +-
 .../server/controller/internal/UnitUpdater.java | 150 +++++++++++++++++++
 .../validators/TopologyValidatorFactory.java    |   2 +-
 .../validators/UnitValidatedProperty.java       |  95 ++++++++++++
 .../topology/validators/UnitValidator.java      |  79 ++++++++++
 .../controller/internal/UnitUpdaterTest.java    | 114 ++++++++++++++
 .../topology/validators/UnitValidatorTest.java  | 114 ++++++++++++++
 8 files changed, 571 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 37284be..1daf76f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -19,6 +19,8 @@
 package org.apache.ambari.server.controller.internal;
 
 
+import static java.util.stream.Collectors.groupingBy;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -48,6 +50,7 @@ import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
+import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -2041,39 +2044,6 @@ public class BlueprintConfigurationProcessor {
   }
 
   /**
-   * Updater which appends "m" to the original property value.
-   * For example, "1024" would be updated to "1024m".
-   */
-  private static class MPropertyUpdater implements PropertyUpdater {
-    /**
-     * Append 'm' to the original property value if it doesn't already exist.
-     *
-     * @param propertyName  property name
-     * @param origValue     original value of property
-     * @param properties    all properties
-     * @param topology      cluster topology
-     *
-     * @return property with 'm' appended
-     */
-    @Override
-    public String updateForClusterCreate(String propertyName,
-                                         String origValue,
-                                         Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology) {
-
-      return origValue.endsWith("m") ? origValue : origValue + 'm';
-    }
-
-    @Override
-    public Collection<String> getRequiredHostGroups(String propertyName,
-                                                    String origValue,
-                                                    Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology) {
-      return Collections.emptySet();
-    }
-  }
-
-  /**
    * Class to facilitate special formatting needs of property values.
    */
   private abstract static class AbstractPropertyValueDecorator implements PropertyUpdater {
@@ -2784,20 +2754,7 @@ public class BlueprintConfigurationProcessor {
       new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack
     // are now correct but are left here in case an existing blueprint still contains an old value.
-    mHadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
-    mHadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
-    mHadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
-    mHadoopEnvMap.put("namenode_opt_permsize", new MPropertyUpdater());
-    mHadoopEnvMap.put("namenode_opt_maxpermsize", new MPropertyUpdater());
-    mHadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
-    mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater());
-    mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
-    mapredEnvMap.put("jtnode_heapsize", new MPropertyUpdater());
-    hbaseEnvMap.put("hbase_master_heapsize", new MPropertyUpdater());
-    hbaseEnvMap.put("hbase_regionserver_heapsize", new MPropertyUpdater());
-    oozieEnvHeapSizeMap.put("oozie_heapsize", new MPropertyUpdater());
-    oozieEnvHeapSizeMap.put("oozie_permsize", new MPropertyUpdater());
-    zookeeperEnvMap.put("zk_server_heapsize", new MPropertyUpdater());
+    addUnitPropertyUpdaters();
 
     hawqSiteMap.put("hawq_master_address_host", new SingleHostTopologyUpdater("HAWQMASTER"));
     hawqSiteMap.put("hawq_standby_address_host", new SingleHostTopologyUpdater("HAWQSTANDBY"));
@@ -2816,6 +2773,19 @@ public class BlueprintConfigurationProcessor {
     });
   }
 
+  private static void addUnitPropertyUpdaters() {
+    Map<String, List<UnitValidatedProperty>> propsPerConfigType = UnitValidatedProperty.ALL
+      .stream()
+      .collect(groupingBy(UnitValidatedProperty::getConfigType));
+    for (String configType : propsPerConfigType.keySet()) {
+      Map<String, PropertyUpdater> unitUpdaters = new HashMap<>();
+      for (UnitValidatedProperty each : propsPerConfigType.get(configType)) {
+        unitUpdaters.put(each.getPropertyName(), new UnitUpdater(each.getServiceName(), each.getConfigType()));
+      }
+      mPropertyUpdaters.put(configType, unitUpdaters);
+    }
+  }
+
   private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<String> configTypesUpdated) {
     // AMBARI-5206
     final Map<String , String> userProps = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
index e1ea1cd..a28a3b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
@@ -757,7 +757,7 @@ public class Stack {
     private Set<PropertyDependencyInfo> dependsOnProperties =
       Collections.emptySet();
 
-    ConfigProperty(StackConfigurationResponse config) {
+    public ConfigProperty(StackConfigurationResponse config) {
       this.name = config.getPropertyName();
       this.value = config.getPropertyValue();
       this.attributes = config.getPropertyAttributes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
new file mode 100644
index 0000000..8b7cb67
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static org.apache.commons.lang.StringUtils.isBlank;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
+
+/**
+ * I append the stack defined unit to the original property value.
+ * For example, "1024" would be updated to "1024m" if the stack unit is MB
+ * Properties with any other unit than the stack defined unit are rejected.
+ */
+public class UnitUpdater implements BlueprintConfigurationProcessor.PropertyUpdater {
+  private final String serviceName;
+  private final String configType;
+
+  public UnitUpdater(String serviceName, String configType) {
+    this.serviceName = serviceName;
+    this.configType = configType;
+  }
+
+  /**
+   * @return property value with updated unit
+   */
+  @Override
+  public String updateForClusterCreate(String propertyName,
+                                       String origValue,
+                                       Map<String, Map<String, String>> properties,
+                                       ClusterTopology topology) {
+      PropertyUnit stackUnit = PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, propertyName);
+      PropertyValue value = PropertyValue.of(propertyName, origValue);
+      if (value.hasUnit(stackUnit)) {
+        return value.toString();
+      } else if (!value.hasAnyUnit()) {
+        return value.withUnit(stackUnit);
+      } else { // should not happen because of prevalidation in UnitValidator
+        throw new IllegalArgumentException("Property " + propertyName + "=" + origValue + " has an unsupported unit. Stack supported unit is: " + stackUnit + " or no unit");
+      }
+  }
+
+  @Override
+  public Collection<String> getRequiredHostGroups(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
+    return Collections.emptySet();
+  }
+
+  public static class PropertyUnit {
+    private static final String DEFAULT_UNIT = "m";
+    private final String unit;
+
+    public static PropertyUnit of(Stack stack, UnitValidatedProperty property) {
+      return PropertyUnit.of(stack, property.getServiceName(), property.getConfigType(), property.getPropertyName());
+    }
+
+    public static PropertyUnit of(Stack stack, String serviceName, String configType, String propertyName) {
+      return new PropertyUnit(
+        stackUnit(stack, serviceName, configType, propertyName)
+          .map(PropertyUnit::toJvmUnit)
+          .orElse(DEFAULT_UNIT));
+    }
+
+    private static Optional<String> stackUnit(Stack stack, String serviceName, String configType, String propertyName) {
+      try {
+        return Optional.ofNullable(
+          stack.getConfigurationPropertiesWithMetadata(serviceName, configType)
+            .get(propertyName)
+            .getPropertyValueAttributes()
+            .getUnit());
+      } catch (NullPointerException e) {
+        return Optional.empty();
+      }
+    }
+
+    private static String toJvmUnit(String stackUnit) {
+      switch (stackUnit.toLowerCase()) {
+        case "mb" : return "m";
+        case "gb" : return "g";
+        case "b"  :
+        case "bytes" : return "";
+        default: throw new IllegalArgumentException("Unsupported stack unit: " + stackUnit);
+      }
+    }
+
+    private PropertyUnit(String unit) {
+      this.unit = unit;
+    }
+
+    @Override
+    public String toString() {
+      return unit;
+    }
+  }
+
+  public static class PropertyValue {
+    private final String value;
+
+    public static PropertyValue of(String name, String value) {
+      return new PropertyValue(normalized(name, value));
+    }
+
+    private static String normalized(String name, String value) {
+      if (isBlank(value)) {
+        throw new IllegalArgumentException("Missing property value " + name);
+      }
+      return value.trim().toLowerCase();
+    }
+
+    private PropertyValue(String value) {
+      this.value = value;
+    }
+
+    public boolean hasUnit(PropertyUnit unit) {
+      return value.endsWith(unit.toString());
+    }
+
+    public boolean hasAnyUnit() {
+      return !Character.isDigit(value.charAt(value.length() -1));
+    }
+
+    public String withUnit(PropertyUnit unit) {
+      return value + unit;
+    }
+
+    @Override
+    public String toString() {
+      return value;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
index 5a6f64e..bc76bff 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
@@ -25,7 +25,7 @@ public class TopologyValidatorFactory {
 
   public TopologyValidatorFactory() {
     validators = ImmutableList.of(new RequiredConfigPropertiesValidator(), new RequiredPasswordValidator(), new HiveServiceValidator(),
-      new StackConfigTypeValidator());
+      new StackConfigTypeValidator(), new UnitValidator(UnitValidatedProperty.ALL));
   }
 
   public TopologyValidator createConfigurationValidatorChain() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
new file mode 100644
index 0000000..61f01db
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Set;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Some configuration values need to have "m" appended to them to be valid values.
+ * Required due to AMBARI-4933.
+ */
+public class UnitValidatedProperty {
+  public static final Set<UnitValidatedProperty> ALL = ImmutableSet.<UnitValidatedProperty>builder()
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_heapsize"))
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_newsize"))
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_maxnewsize"))
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_permsize"))
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_maxpermsize"))
+    .add(new UnitValidatedProperty("HDFS", "hadoop-env", "dtnode_heapsize"))
+    .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_opt_newsize"))
+    .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_opt_maxnewsize"))
+    .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_heapsize"))
+    .add(new UnitValidatedProperty("HBASE", "hbase-env", "hbase_master_heapsize"))
+    .add(new UnitValidatedProperty("HBASE", "hbase-env","hbase_regionserver_heapsize"))
+    .add(new UnitValidatedProperty("OOZIE", "oozie-env","oozie_heapsize"))
+    .add(new UnitValidatedProperty("OOZIE", "oozie-env", "oozie_permsize"))
+    .add(new UnitValidatedProperty("ZOOKEEPER", "zookeeper-env", "zk_server_heapsize"))
+    .build();
+
+  private final String configType;
+  private final String serviceName;
+  private final String propertyName;
+
+  public UnitValidatedProperty(String serviceName, String configType, String propertyName) {
+    this.configType = configType;
+    this.serviceName = serviceName;
+    this.propertyName = propertyName;
+  }
+
+  public boolean hasTypeAndName(String configType, String propertyName) {
+    return configType.equals(this.getConfigType()) && propertyName.equals(this.getPropertyName());
+  }
+
+  public String getConfigType() {
+    return configType;
+  }
+
+  public String getServiceName() {
+    return serviceName;
+  }
+
+  public String getPropertyName() {
+    return propertyName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    UnitValidatedProperty that = (UnitValidatedProperty) o;
+    return new EqualsBuilder()
+      .append(configType, that.configType)
+      .append(serviceName, that.serviceName)
+      .append(propertyName, that.propertyName)
+      .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 37)
+      .append(configType)
+      .append(serviceName)
+      .append(propertyName)
+      .toHashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
new file mode 100644
index 0000000..e75ffa4
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import static org.apache.ambari.server.controller.internal.UnitUpdater.PropertyValue;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.controller.internal.UnitUpdater.PropertyUnit;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroupInfo;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+
+/**
+ * I validate the unit of properties by checking if it matches to the stack defined unit.
+ * Properties with different unit than the stack defined unit are rejected.
+ */
+public class UnitValidator implements TopologyValidator {
+  private final Set<UnitValidatedProperty> relevantProps;
+
+  public UnitValidator(Set<UnitValidatedProperty> propertiesToBeValidated) {
+    this.relevantProps = propertiesToBeValidated;
+  }
+
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+    Stack stack = topology.getBlueprint().getStack();
+    validateConfig(topology.getConfiguration().getFullProperties(), stack);
+    for (HostGroupInfo hostGroup : topology.getHostGroupInfo().values()) {
+      validateConfig(hostGroup.getConfiguration().getFullProperties(), stack);
+    }
+  }
+
+  private void validateConfig(Map<String, Map<String, String>> configuration, Stack stack) {
+    for (Map.Entry<String, Map<String, String>> each : configuration.entrySet()) {
+      validateConfigType(each.getKey(), each.getValue(), stack);
+    }
+  }
+
+  private void validateConfigType(String configType, Map<String, String> config, Stack stack) {
+    for (String propertyName : config.keySet()) {
+      validateProperty(configType, config, propertyName, stack);
+    }
+  }
+
+  private void validateProperty(String configType, Map<String, String> config, String propertyName, Stack stack) {
+    relevantProps.stream()
+      .filter(each -> each.hasTypeAndName(configType, propertyName))
+      .findFirst()
+      .ifPresent(relevantProperty -> checkUnit(config, stack, relevantProperty));
+  }
+
+  private void checkUnit(Map<String, String> configToBeValidated, Stack stack, UnitValidatedProperty prop) {
+    PropertyUnit stackUnit = PropertyUnit.of(stack, prop);
+    PropertyValue value = PropertyValue.of(prop.getPropertyName(), configToBeValidated.get(prop.getPropertyName()));
+    if (value.hasAnyUnit() && !value.hasUnit(stackUnit)) {
+      throw new IllegalArgumentException("Property " + prop.getPropertyName() + "=" + value + " has an unsupported unit. Stack supported unit is: " + stackUnit + " or no unit");
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
new file mode 100644
index 0000000..6de6cd1
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static org.easymock.EasyMock.expect;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class UnitUpdaterTest extends EasyMockSupport {
+  public static final String HEAPSIZE = "oozie_heapsize";
+  @Rule public EasyMockRule mocks = new EasyMockRule(this);
+  public static final String OOZIE = "OOZIE";
+  public static final String OOZIE_ENV = "oozie-env";
+  private Map<String, Stack.ConfigProperty> stackConfigWithMetadata = new HashMap<>();
+  private UnitUpdater unitUpdater;
+  private @Mock ClusterTopology clusterTopology;
+  private @Mock Blueprint blueprint;
+  private @Mock Stack stack;
+
+  @Test
+  public void testStackUnitIsAppendedWhereUnitIsNotDefined() throws Exception {
+    stackUnitIs(HEAPSIZE, "GB");
+    assertEquals("1g", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "1"));
+  }
+
+  @Test
+  public void testDefaultMbStackUnitIsAppendedWhereUnitIsNotDefined() throws Exception {
+    assertEquals("4096m", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "4096"));
+  }
+
+  @Test
+  public void testNoUnitIsAppendedWhenPropertyAlreadyHasTheStackUnit() throws Exception {
+    stackUnitIs(HEAPSIZE, "MB");
+    assertEquals("128m", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "128m"));
+  }
+
+  @Test
+  public void testNoUnitIsAppendedIfStackUnitIsInBytes() throws Exception {
+    stackUnitIs(HEAPSIZE, "Bytes");
+    assertEquals("128", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "128"));
+  }
+
+  @Test
+  public void testUnitSuffixIsCaseInsenitiveAndWhiteSpaceTolerant() throws Exception {
+    stackUnitIs(HEAPSIZE, "GB");
+    assertEquals("1g", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, " 1G "));
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testRejectValuesWhereStackUnitDoesNotMatchToGiveUnit() throws Exception {
+    stackUnitIs(HEAPSIZE, "MB");
+    updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "2g");
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testRejectEmptyPropertyValue() throws Exception {
+    updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "");
+  }
+
+  private void stackUnitIs(String name, String unit) {
+    ValueAttributesInfo propertyValueAttributes = new ValueAttributesInfo();
+    propertyValueAttributes.setUnit(unit);
+    stackConfigWithMetadata.put(name, new Stack.ConfigProperty(new StackConfigurationResponse(
+      name,
+      "any",
+      "any",
+      "any",
+      "any",
+      true,
+      Collections.emptySet(),
+      Collections.emptyMap(),
+      propertyValueAttributes,
+      Collections.emptySet()
+    )));
+  }
+
+  private String updateUnit(String serviceName, String configType, String propName, String propValue) throws InvalidTopologyException, ConfigurationTopologyException {
+    UnitUpdater updater = new UnitUpdater(serviceName, configType);
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getConfigurationPropertiesWithMetadata(serviceName, configType)).andReturn(stackConfigWithMetadata).anyTimes();
+    replayAll();
+    return updater.updateForClusterCreate(propName, propValue, Collections.emptyMap(), clusterTopology);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
new file mode 100644
index 0000000..334ee4b
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.emptyMap;
+import static org.easymock.EasyMock.expect;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+
+public class UnitValidatorTest extends EasyMockSupport {
+  private static final String CONFIG_TYPE = "config-type";
+  private static final String SERVICE = "service";
+  @Rule public EasyMockRule mocks = new EasyMockRule(this);
+  private Map<String, Stack.ConfigProperty> stackConfigWithMetadata = new HashMap<>();
+  private UnitValidator validator;
+  private @Mock ClusterTopology clusterTopology;
+  private @Mock Blueprint blueprint;
+  private @Mock Stack stack;
+
+  @Test(expected = IllegalArgumentException.class)
+  public void rejectsPropertyWithDifferentUnitThanStackUnit() throws Exception {
+    stackUnitIs("property1", "MB");
+    propertyToBeValidatedIs("property1", "12G");
+    validate("property1");
+  }
+
+  @Test
+  public void acceptsPropertyWithSameUnitThanStackUnit() throws Exception {
+    stackUnitIs("property1", "MB");
+    propertyToBeValidatedIs("property1", "12m");
+    validate("property1");
+  }
+
+  @Test
+  public void skipsValidatingIrrelevantProperty() throws Exception {
+    stackUnitIs("property1", "MB");
+    propertyToBeValidatedIs("property1", "12g");
+    validate("property2");
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(clusterTopology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes();
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(stack.getConfigurationPropertiesWithMetadata(SERVICE, CONFIG_TYPE)).andReturn(stackConfigWithMetadata).anyTimes();
+  }
+
+  private void propertyToBeValidatedIs(String propertyName, String propertyValue) throws InvalidTopologyException, ConfigurationTopologyException {
+    Map<String, Map<String, String>> propertiesToBeValidated = new HashMap<String, Map<String, String>>() {{
+      put(CONFIG_TYPE, new HashMap<String, String>(){{
+        put(propertyName, propertyValue);
+      }});
+    }};
+    expect(clusterTopology.getConfiguration()).andReturn(new Configuration(propertiesToBeValidated, emptyMap())).anyTimes();
+    replayAll();
+  }
+
+  private void validate(String propertyName) throws InvalidTopologyException {
+    validator = new UnitValidator(newHashSet(new UnitValidatedProperty(SERVICE, CONFIG_TYPE, propertyName)));
+    validator.validate(clusterTopology);
+  }
+
+  private void stackUnitIs(String name, String unit) {
+    ValueAttributesInfo propertyValueAttributes = new ValueAttributesInfo();
+    propertyValueAttributes.setUnit(unit);
+    stackConfigWithMetadata.put(name, new Stack.ConfigProperty(new StackConfigurationResponse(
+      name,
+      "any",
+      "any",
+      "any",
+      "any",
+      true,
+      Collections.emptySet(),
+      Collections.emptyMap(),
+      propertyValueAttributes,
+      Collections.emptySet()
+    )));
+  }
+}
\ No newline at end of file


[11/50] [abbrv] ambari git commit: AMBARI-21479 Deploys failing with Namenode install failure (dgrinenko)

Posted by nc...@apache.org.
AMBARI-21479 Deploys failing with Namenode install failure (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4e1da58a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4e1da58a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4e1da58a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 4e1da58a9479889f1624e9887bb681a54e2ae6ae
Parents: 9bfea65
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Fri Jul 14 20:26:52 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Fri Jul 14 20:26:52 2017 +0300

----------------------------------------------------------------------
 .../HDP/3.0/configuration/cluster-env.xml       |   4 +-
 .../HDP/3.0/properties/stack_features.json      | 752 ++++++++++---------
 .../stacks/HDP/3.0/properties/stack_tools.json  |  14 +-
 3 files changed, 391 insertions(+), 379 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
index 341079b..ca3be1d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -252,8 +252,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
+    <value>{"HDP":"/usr/hdp"}</value>
+    <description>JSON which defines the stack root by stack name</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
index b081ee1..9422cbc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -1,377 +1,379 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "kafka_acl_migration_support",
-      "description": "ACL migration support",
-      "min_version": "2.3.4.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "ranger_tagsync_ssl_xml_support",
-      "description": "Ranger Tagsync ssl xml support.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_xml_configuration",
-      "description": "Ranger code base support xml configurations",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_ranger_plugin_support",
-      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "yarn_ranger_plugin_support",
-      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_solr_config_support",
-      "description": "Showing Ranger solrconfig.xml on UI",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "core_site_for_ranger_plugins",
-      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "secure_ranger_ssl_password",
-      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_kms_ssl",
-      "description": "Ranger KMS SSL properties in ambari stack",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_hdfs_site_on_namenode_ha",
-      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-      "min_version": "2.6.0.0"
-    }
-  ]
+  "HDP": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "min_version": "2.0.0.0",
+        "max_version": "2.2.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "2.2.1.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "2.1.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "kafka_acl_migration_support",
+        "description": "ACL migration support",
+        "min_version": "2.3.4.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "tez_for_spark",
+        "description": "Tez dependency for Spark",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "2.4.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "storm_kerberos",
+        "description": "Storm Kerberos support (AMBARI-7570)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "storm_ams",
+        "description": "Storm AMS integration (AMBARI-10710)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "pig_on_tez",
+        "description": "Pig on Tez support (AMBARI-7863)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "2.2.0.0",
+        "max_version": "2.5.0.0"
+      },
+      {
+        "name": "accumulo_kerberos_user_auth",
+        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "2.3.8.0"
+      },
+      {
+        "name": "atlas_rolling_upgrade",
+        "description": "Rolling upgrade support for Atlas",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_create_hive_tez_configs",
+        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "2.0.0.0"
+      },
+      {
+        "name": "falcon_extensions",
+        "description": "Falcon Extension",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "min_version": "2.2.3.0",
+        "max_version": "2.2.5.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_install_infra_client",
+        "description": "Ambari Infra Service support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "falcon_atlas_support_2_3",
+        "description": "Falcon Atlas integration support for 2.3 stack",
+        "min_version": "2.3.99.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "falcon_atlas_support",
+        "description": "Falcon Atlas integration",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_ranger_plugin_support",
+        "description": "Atlas Ranger plugin support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_conf_dir_in_path",
+        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+        "min_version": "2.3.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "atlas_upgrade_support",
+        "description": "Atlas supports express and rolling upgrades",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_hook_support",
+        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_admin_password_change",
+        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "storm_metrics_apache_classes",
+        "description": "Metrics sink for Storm that uses Apache class names",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_java_opts_support",
+        "description": "Allow Spark to generate java-opts file",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "atlas_hbase_setup",
+        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_hive_plugin_jdbc_url",
+        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "zkfc_version_advertised",
+        "description": "ZKFC advertise version",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix_core_hdfs_site_required",
+        "description": "HDFS and CORE site required for Phoenix",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "ranger_tagsync_ssl_xml_support",
+        "description": "Ranger Tagsync ssl xml support.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_xml_configuration",
+        "description": "Ranger code base support xml configurations",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_ranger_plugin_support",
+        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "yarn_ranger_plugin_support",
+        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_solr_config_support",
+        "description": "Showing Ranger solrconfig.xml on UI",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "core_site_for_ranger_plugins",
+        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "secure_ranger_ssl_password",
+        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_kms_ssl",
+        "description": "Ranger KMS SSL properties in ambari stack",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_hdfs_site_on_namenode_ha",
+        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+        "min_version": "2.6.0.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
index d1aab4b..e1a65c2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+  "HDP": {
+    "stack_selector": [
+      "hdp-select",
+      "/usr/bin/hdp-select",
+      "hdp-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
 }
\ No newline at end of file


[15/50] [abbrv] ambari git commit: AMBARI-21459. Add lucene index migration script to infra solr client package (oleewere)

Posted by nc...@apache.org.
AMBARI-21459. Add lucene index migration script to infra solr client package (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f072dd21
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f072dd21
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f072dd21

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f072dd2156e83d8a487ce0c3229c1ae22788c6be
Parents: e799f52
Author: oleewere <ol...@gmail.com>
Authored: Wed Jul 12 21:04:54 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Sat Jul 15 19:04:53 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-solr-client/build.xml |   1 +
 ambari-infra/ambari-infra-solr-client/pom.xml   |  10 ++
 .../src/main/resources/solrIndexHelper.sh       | 156 +++++++++++++++++++
 3 files changed, 167 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/build.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/build.xml b/ambari-infra/ambari-infra-solr-client/build.xml
index a54e336..9b8b6cc 100644
--- a/ambari-infra/ambari-infra-solr-client/build.xml
+++ b/ambari-infra/ambari-infra-solr-client/build.xml
@@ -35,6 +35,7 @@
     </copy>
     <copy todir="target/package" includeEmptyDirs="no">
       <fileset file="src/main/resources/solrCloudCli.sh"/>
+      <fileset file="src/main/resources/solrIndexHelper.sh"/>
     </copy>
     <copy todir="target/package" includeEmptyDirs="no">
       <fileset file="src/main/resources/log4j.properties"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/pom.xml b/ambari-infra/ambari-infra-solr-client/pom.xml
index d103003..3818aba 100644
--- a/ambari-infra/ambari-infra-solr-client/pom.xml
+++ b/ambari-infra/ambari-infra-solr-client/pom.xml
@@ -36,6 +36,16 @@
       <version>${solr.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>${solr.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-backward-codecs</artifactId>
+      <version>${solr.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <version>3.4.9</version>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
new file mode 100755
index 0000000..12e6a77
--- /dev/null
+++ b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+: ${JAVA_HOME:?"Please set the JAVA_HOME for lucene index migration!"}
+
+function print_help() {
+  cat << EOF
+
+   Usage: solrIndexHelper.sh [<command>] [<arguments with flags>]
+
+   commands:
+     upgrade-index            Check and upgrade solr index data in core directories.
+     run-check-index-tool     call 'java -cp ... org.apache.lucene.index.IndexUpgrader' directly
+     run-upgrade-index-tool   call 'java -cp ... org.apache.lucene.index.CheckIndex' directly
+     help                     print usage
+
+
+   upgrade-index command arguments:
+     -d, --index-data-dir <DIRECTORY>        Location of the solr cores (e.g.: /opt/ambari_infra_solr/data)
+     -c, --core-filter <FILTER1,FILTER2>     Comma separated name filters of core directoies (default: hadoop_logs,audit_logs,history)
+     -f, --force                             Force to start index upgrade, even is the version is at least 6.
+
+EOF
+}
+
+function upgrade_core() {
+  local INDEX_DIR=${1:?"usage: <index_base_dir> e.g.: /opt/ambari_infra_solr/data"}
+  local FORCE_UPDATE=${2:?"usage <force_update_flag> e.g.: true"}
+  local SOLR_CORE_FILTERS=${3:?"usage: <comma separated core filters> e.g.: hadoop_logs,audit_logs,history"}
+
+  SOLR_CORE_FILTER_ARR=$(echo $SOLR_CORE_FILTERS | sed "s/,/ /g")
+
+  for coll in $SOLR_CORE_FILTER_ARR; do
+    if [[ "$1" == *"$coll"* ]]; then
+      echo "Core '$1' dir name contains $coll (core filter)'";
+      version=$(PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex -fast $1|grep "   version="|sed -e 's/.*=//g'|head -1)
+      if [ -z $version ] ; then
+        echo "Core '$1' - Empty index?"
+        return
+      fi
+      majorVersion=$(echo $version|cut -c 1)
+      if [ $majorVersion -ge 6 ] && [ $FORCE_UPDATE == "false" ] ; then
+        echo "Core '$1' - Already on version $version, not upgrading. Use -f or --force option to run upgrade anyway."
+      else
+        echo "Core '$1' - Index version is $version, upgrading ..."
+        PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader -delete-prior-commits $1
+        echo "Upgrading core '$1' has finished"
+      fi
+    fi
+  done
+}
+
+function upgrade_index() {
+  while [[ $# -gt 0 ]]
+    do
+      key="$1"
+      case $key in
+        -c|--core-filters)
+          local SOLR_CORE_FILTERS="$2"
+          shift 2
+        ;;
+        -f|--force)
+          local FORCE_UPDATE="true"
+          shift
+        ;;
+        -d|--index-data-dir)
+          local INDEX_DIR="$2"
+          shift 2
+        ;;
+        *)
+          echo "Unknown option: $1"
+          exit 1
+        ;;
+      esac
+  done
+  if [[ -z "$INDEX_DIR" ]] ; then
+    echo "Index data dirctory option is required (-d or --index-data-dir). Exiting..."
+    exit 1
+  fi
+
+  if [[ -z "$SOLR_CORE_FILTERS" ]] ; then
+    SOLR_CORE_FILTERS="hadoop_logs,audit_logs,history"
+  fi
+
+  if [[ -z "$FORCE_UPDATE" ]] ; then
+    FORCE_UPDATE="false"
+  else
+    echo "NOTE: Forcing index upgrade is set."
+  fi
+
+  CORES=$(for replica_dir in `find $INDEX_DIR -name data`; do dirname $replica_dir; done);
+  if [[ -z "$CORES" ]] ; then
+    echo "No indices found on path $INDEX_DIR"
+  else
+      for c in $CORES ; do
+        if find $c/data -maxdepth 1 -type d -name 'index*' 1> /dev/null 2>&1; then
+          name=$(echo $c | sed -e 's/.*\///g')
+          abspath=$(cd "$(dirname "$c")"; pwd)/$(basename "$c")
+          find $c/data -maxdepth 1 -type d -name 'index*' | while read indexDir; do
+          echo "Checking core $name - $abspath"
+          upgrade_core "$indexDir" "$FORCE_UPDATE" "$SOLR_CORE_FILTERS"
+          done
+        else
+          echo "No index folder found for $name"
+        fi
+      done
+      echo "DONE"
+  fi
+}
+
+function upgrade_index_tool() {
+  # see: https://cwiki.apache.org/confluence/display/solr/IndexUpgrader+Tool
+  PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader ${@}
+}
+
+function check_index_tool() {
+  PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex ${@}
+}
+
+function main() {
+  command="$1"
+  case $command in
+   "upgrade-index")
+     upgrade_index "${@:2}"
+     ;;
+   "run-check-index-tool")
+     check_index_tool "${@:2}"
+     ;;
+   "run-upgrade-index-tool")
+     upgrade_index_tool "${@:2}"
+     ;;
+   "help")
+     print_help
+     ;;
+   *)
+   echo "Available commands: (upgrade-index | run-check-index-tool | run-upgrade-index-tool | help)"
+   ;;
+   esac
+}
+
+main ${1+"$@"}


[47/50] [abbrv] ambari git commit: AMBARI-21552 - Pass Repository ID To Upgrade Prechecks (part2) (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21552 - Pass Repository ID To Upgrade Prechecks (part2) (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/95e7719b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/95e7719b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/95e7719b

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 95e7719b17d3672e86af13e07041ddbb902fc922
Parents: 495a3f4
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sat Jul 22 22:34:19 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Jul 22 22:40:12 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/checks/AbstractCheckDescriptor.java   |  8 ++++----
 .../ambari/server/checks/ConfigurationMergeCheck.java   |  2 +-
 .../server/checks/HostsMasterMaintenanceCheck.java      |  4 ++--
 .../server/checks/HostsRepositoryVersionCheck.java      |  8 ++++----
 .../ambari/server/checks/InstallPackagesCheck.java      |  6 +++---
 .../ambari/server/controller/PrereqCheckRequest.java    | 11 ++++++++++-
 .../internal/PreUpgradeCheckResourceProvider.java       | 12 ++++++++----
 .../controller/internal/ReadOnlyResourceProvider.java   |  2 +-
 .../org/apache/ambari/server/state/UpgradeHelper.java   |  4 ++--
 9 files changed, 35 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
index fddded7..bda2c07 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
@@ -145,7 +145,7 @@ public abstract class AbstractCheckDescriptor {
     if (serviceFound && null != request.getTargetStackId()) {
       String stackName = request.getTargetStackId().getStackName();
       RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().
-        findByStackNameAndVersion(stackName, request.getRepositoryVersion());
+          findByStackNameAndVersion(stackName, request.getTargetVersion());
 
       if (RepositoryType.STANDARD != rve.getType()) {
         try {
@@ -155,7 +155,7 @@ public abstract class AbstractCheckDescriptor {
             serviceFound = false;
           }
         } catch (Exception e) {
-          LOG.warn("Could not parse xml for %s", request.getRepositoryVersion(), e);
+          LOG.warn("Could not parse xml for %s", request.getTargetVersion(), e);
         }
       }
     }
@@ -253,8 +253,8 @@ public abstract class AbstractCheckDescriptor {
       PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) {
     String fail = m_description.getFail(key);
 
-    if (fail.contains("{{version}}") && null != request.getRepositoryVersion()) {
-      fail = fail.replace("{{version}}", request.getRepositoryVersion());
+    if (fail.contains("{{version}}") && null != request.getTargetVersion()) {
+      fail = fail.replace("{{version}}", request.getTargetVersion());
     }
 
     if (fail.contains("{{fails}}")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/checks/ConfigurationMergeCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ConfigurationMergeCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ConfigurationMergeCheck.java
index 37fe4b0..48c652f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/ConfigurationMergeCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ConfigurationMergeCheck.java
@@ -64,7 +64,7 @@ public class ConfigurationMergeCheck extends AbstractCheckDescriptor {
       throws AmbariException {
 
     String stackName = request.getTargetStackId().getStackName();
-    RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().findByStackNameAndVersion(stackName, request.getRepositoryVersion());
+    RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().findByStackNameAndVersion(stackName, request.getTargetVersion());
 
     Map<String, Map<String, ThreeWayValue>> changes =
         m_mergeHelper.getConflicts(request.getClusterName(), rve.getStackId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheck.java
index e5082c9..1c59d2e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheck.java
@@ -59,7 +59,7 @@ public class HostsMasterMaintenanceCheck extends AbstractCheckDescriptor {
 
   @Override
   public boolean isApplicable(PrereqCheckRequest request) throws AmbariException {
-      return super.isApplicable(request) && request.getRepositoryVersion() != null;
+      return super.isApplicable(request) && request.getTargetVersion() != null;
   }
 
   @Override
@@ -70,7 +70,7 @@ public class HostsMasterMaintenanceCheck extends AbstractCheckDescriptor {
     final Set<String> hostsWithMasterComponent = new HashSet<>();
 
     // TODO AMBARI-12698, need to pass the upgrade pack to use in the request, or at least the type.
-    final String upgradePackName = repositoryVersionHelper.get().getUpgradePackageName(stackId.getStackName(), stackId.getStackVersion(), request.getRepositoryVersion(), null);
+    final String upgradePackName = repositoryVersionHelper.get().getUpgradePackageName(stackId.getStackName(), stackId.getStackVersion(), request.getTargetVersion(), null);
     if (upgradePackName == null) {
       prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
       String fail = getFailReason(KEY_NO_UPGRADE_NAME, prerequisiteCheck, request);

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
index a66db3c..37a9d2f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheck.java
@@ -58,7 +58,7 @@ public class HostsRepositoryVersionCheck extends AbstractCheckDescriptor {
 
   @Override
   public boolean isApplicable(PrereqCheckRequest request) throws AmbariException {
-    return super.isApplicable(request) && request.getRepositoryVersion() != null;
+    return super.isApplicable(request) && request.getTargetVersion() != null;
   }
 
   @Override
@@ -78,11 +78,11 @@ public class HostsRepositoryVersionCheck extends AbstractCheckDescriptor {
         continue;
       }
 
-      if (null != request.getRepositoryVersion()) {
+      if (null != request.getTargetVersion()) {
         boolean found = false;
         for (HostVersionEntity hve : hostVersionDaoProvider.get().findByHost(host.getHostName())) {
 
-          if (hve.getRepositoryVersion().getVersion().equals(request.getRepositoryVersion())
+          if (hve.getRepositoryVersion().getVersion().equals(request.getTargetVersion())
               && (hve.getState() == RepositoryVersionState.INSTALLED || hve.getState() == RepositoryVersionState.NOT_REQUIRED)) {
             found = true;
             break;
@@ -94,7 +94,7 @@ public class HostsRepositoryVersionCheck extends AbstractCheckDescriptor {
         }
       } else {
         final RepositoryVersionEntity repositoryVersion = repositoryVersionDaoProvider.get().findByStackAndVersion(
-            stackId, request.getRepositoryVersion());
+            stackId, request.getTargetVersion());
         if (repositoryVersion == null) {
           prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
           prerequisiteCheck.setFailReason(

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/checks/InstallPackagesCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/InstallPackagesCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/InstallPackagesCheck.java
index ac2116f..7c84e5c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/InstallPackagesCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/InstallPackagesCheck.java
@@ -61,9 +61,9 @@ public class InstallPackagesCheck extends AbstractCheckDescriptor {
     final Cluster cluster = clustersProvider.get().getCluster(clusterName);
     final StackId targetStackId = request.getTargetStackId();
     final String stackName = targetStackId.getStackName();
-    final String repoVersion = request.getRepositoryVersion();
+    final String repoVersion = request.getTargetVersion();
 
-    final RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().findByStackNameAndVersion(stackName, request.getRepositoryVersion());
+    final RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().findByStackNameAndVersion(stackName, request.getTargetVersion());
     if (StringUtils.isBlank(rve.getVersion()) || !rve.getVersion().matches("^\\d+(\\.\\d+)*\\-\\d+$")) {
       String message = MessageFormat.format("The Repository Version {0} for Stack {1} must contain a \"-\" followed by a build number. " +
               "Make sure that another registered repository does not have the same repo URL or " +
@@ -79,7 +79,7 @@ public class InstallPackagesCheck extends AbstractCheckDescriptor {
     for (Host host : cluster.getHosts()) {
       if (host.getMaintenanceState(cluster.getClusterId()) != MaintenanceState.ON) {
         for (HostVersionEntity hve : hostVersionDaoProvider.get().findByHost(host.getHostName())) {
-          if (hve.getRepositoryVersion().getVersion().equals(request.getRepositoryVersion())
+          if (hve.getRepositoryVersion().getVersion().equals(request.getTargetVersion())
               && hve.getState() == RepositoryVersionState.INSTALL_FAILED) {
             failedHosts.add(host.getHostName());
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
index bd207ae..a7faf87 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/PrereqCheckRequest.java
@@ -64,7 +64,7 @@ public class PrereqCheckRequest {
     return m_upgradeType;
   }
 
-  public String getRepositoryVersion() {
+  public String getTargetVersion() {
     if (null == m_targetRepositoryVersion) {
       return null;
     }
@@ -123,6 +123,15 @@ public class PrereqCheckRequest {
   }
 
   /**
+   * Gets the target repository of the upgrade.
+   *
+   * @return the target repository.
+   */
+  public RepositoryVersionEntity getTargetRepositoryVersion() {
+    return m_targetRepositoryVersion;
+  }
+
+  /**
    * Sets the target of the upgrade.
    *
    * @param targetRepositoryVersion

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
index 24a55c1..acf8bc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
@@ -80,6 +80,7 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
   public static final String UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID        = PropertyHelper.getPropertyId("UpgradeChecks", "cluster_name");
   public static final String UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID        = PropertyHelper.getPropertyId("UpgradeChecks", "upgrade_type");
   public static final String UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version_id");
+  public static final String UPGRADE_CHECK_TARGET_REPOSITORY_VERSION       = PropertyHelper.getPropertyId("UpgradeChecks", "repository_version");
 
   /**
    * Optional parameter to specify the preferred Upgrade Pack to use.
@@ -134,7 +135,7 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
    * @param managementController management controller
    */
   public PreUpgradeCheckResourceProvider(AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
+    super(Type.PreUpgradeCheck, propertyIds, keyPropertyIds, managementController);
   }
 
   @Override
@@ -200,8 +201,9 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
       }
 
       if (upgradePack == null) {
-        throw new SystemException(String.format("Upgrade pack not found for the target repository version %s",
-          upgradeCheckRequest.getRepositoryVersion()));
+        throw new SystemException(
+            String.format("Upgrade pack not found for the target repository version %s",
+                upgradeCheckRequest.getTargetRepositoryVersion()));
       }
 
       // ToDo: properly handle exceptions, i.e. create fake check with error description
@@ -234,7 +236,9 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
         setResourceProperty(resource, UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID, prerequisiteCheck.getType(), requestedIds);
         setResourceProperty(resource, UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID, prerequisiteCheck.getClusterName(), requestedIds);
         setResourceProperty(resource, UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID, upgradeType, requestedIds);
-        setResourceProperty(resource, UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID, upgradeCheckRequest.getRepositoryVersion(), requestedIds);
+
+        setResourceProperty(resource, UPGRADE_CHECK_TARGET_REPOSITORY_VERSION_ID_ID, repositoryVersion.getId(), requestedIds);
+        setResourceProperty(resource, UPGRADE_CHECK_TARGET_REPOSITORY_VERSION, repositoryVersion.getVersion(), requestedIds);
 
         resources.add(resource);
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ReadOnlyResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ReadOnlyResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ReadOnlyResourceProvider.java
index 9a1d8f8..505ec63 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ReadOnlyResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ReadOnlyResourceProvider.java
@@ -102,4 +102,4 @@ public abstract class ReadOnlyResourceProvider extends AbstractControllerResourc
     // TODO Auto-generated method stub
     return null;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/95e7719b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 7ca6976..a5881d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -231,10 +231,10 @@ public class UpgradeHelper {
     Cluster cluster = m_clusters.get().getCluster(clusterName);
     StackId currentStack = cluster.getCurrentStackVersion();
 
-    StackId stackForUpgradePack = sourceStackId;
+    StackId stackForUpgradePack = targetStackId;
 
     if (direction.isDowngrade()) {
-      stackForUpgradePack = targetStackId;
+      stackForUpgradePack = sourceStackId;
     }
 
     Map<String, UpgradePack> packs = m_ambariMetaInfoProvider.get().getUpgradePacks(


[02/50] [abbrv] ambari git commit: AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Posted by nc...@apache.org.
AMBARI-21447 Log Feeder should support logs without date (time only) (mgergely)

Change-Id: I853447134873b10fdd3fd604fd84630a9caf9d03


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f788c38
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f788c38
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f788c38

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 9f788c386667bfeb82fff7c35287a5fdb175c349
Parents: 31b9d77
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Jul 12 16:55:48 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Jul 12 16:55:48 2017 +0200

----------------------------------------------------------------------
 .../ambari/logfeeder/mapper/MapperDate.java     | 42 +++++++++++++-------
 1 file changed, 28 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f788c38/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
index 305688b..e099161 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
@@ -19,6 +19,7 @@
 
 package org.apache.ambari.logfeeder.mapper;
 
+import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.Date;
@@ -79,20 +80,7 @@ public class MapperDate extends Mapper {
           jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, ((Date) value).getTime());
         } else if (targetDateFormatter != null) {
           if (srcDateFormatter != null) {
-            Date srcDate = srcDateFormatter.parse(value.toString());
-            //set year in src_date when src_date does not have year component
-            if (!srcDateFormatter.toPattern().contains("yy")) {
-              Calendar currentCalendar = Calendar.getInstance();
-              Calendar logDateCalendar = Calendar.getInstance();
-              logDateCalendar.setTimeInMillis(srcDate.getTime());
-              if (logDateCalendar.get(Calendar.MONTH) > currentCalendar.get(Calendar.MONTH)) {
-                // set previous year as a log year  when log month is grater than current month
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR) - 1);
-              } else {
-                // set current year as a log year
-                srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
-              }
-            }
+            Date srcDate = getSourceDate(value);
             value = targetDateFormatter.format(srcDate);
             jsonObj.put(LogFeederConstants.IN_MEMORY_TIMESTAMP, srcDate.getTime());
           } else {
@@ -111,4 +99,30 @@ public class MapperDate extends Mapper {
     }
     return value;
   }
+
+  private Date getSourceDate(Object value) throws ParseException {
+    Date srcDate = srcDateFormatter.parse(value.toString());
+    
+    Calendar currentCalendar = Calendar.getInstance();
+    
+    if (!srcDateFormatter.toPattern().contains("dd")) {
+      //set year/month/date in src_date when src_date does not have date component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      srcDate = DateUtils.setMonths(srcDate, currentCalendar.get(Calendar.MONTH));
+      srcDate = DateUtils.setDays(srcDate, currentCalendar.get(Calendar.DAY_OF_MONTH));
+      // if with the current date the time stamp is after the current one, it must be previous day
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addDays(srcDate, -1);
+      }      
+    } else if (!srcDateFormatter.toPattern().contains("yy")) {
+      //set year in src_date when src_date does not have year component
+      srcDate = DateUtils.setYears(srcDate, currentCalendar.get(Calendar.YEAR));
+      // if with the current year the time stamp is after the current one, it must be previous year
+      if (srcDate.getTime() > currentCalendar.getTimeInMillis()) {
+        srcDate = DateUtils.addYears(srcDate, -1);
+      }
+    }
+    
+    return srcDate;
+  }
 }


[42/50] [abbrv] ambari git commit: AMBARI-21053 Reverting Beacon stack advisor recommendation (mugdha)

Posted by nc...@apache.org.
AMBARI-21053 Reverting Beacon stack advisor recommendation (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/da21afc7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/da21afc7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/da21afc7

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: da21afc7d9019454f872cff09fe91e8889fca54d
Parents: 2a298a3
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Fri Jul 21 12:38:57 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Fri Jul 21 12:48:50 2017 +0530

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/stack_advisor.py    | 33 +-------------------
 1 file changed, 1 insertion(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/da21afc7/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index fa86f6c..1555581 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -40,42 +40,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
         "HIVE": self.recommendHIVEConfigurations,
         "HBASE": self.recommendHBASEConfigurations,
         "YARN": self.recommendYARNConfigurations,
-        "KAFKA": self.recommendKAFKAConfigurations,
-        "BEACON": self.recommendBEACONConfigurations
+        "KAFKA": self.recommendKAFKAConfigurations
       }
       parentRecommendConfDict.update(childRecommendConfDict)
       return parentRecommendConfDict
 
-  def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
-    beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
-    putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
-
-    # database URL and driver class recommendations
-    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
-      putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
-    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
-      beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
-      beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
-      protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
-      oldSchemaName = self.getOldValue(services, "beacon-env", "beacon_store_db_name")
-      oldDBType = self.getOldValue(services, "beacon-env", "beacon_database")
-      # under these if constructions we are checking if beacon server hostname available,
-      # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
-      # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
-      if beaconServerHost is not None:
-        if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
-          dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
-          putbeaconEnvProperty('beacon_store_url', dbConnection)
-
-  def getDBConnectionStringBeacon(self, databaseType):
-    driverDict = {
-      'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
-      'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
-      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
-      'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
-    }
-    return driverDict.get(databaseType.upper())
-
   def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]


[26/50] [abbrv] ambari git commit: AMBARI-21512. Stack Advisor reported an error: KeyError: stack_name while Issued INSTALLED as new state for NODEMANAGER (smohanty)

Posted by nc...@apache.org.
AMBARI-21512. Stack Advisor reported an error: KeyError: stack_name while Issued INSTALLED as new state for NODEMANAGER (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8de65173
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8de65173
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8de65173

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 8de651738a8338e4744b0a2a661577b08ae01778
Parents: 016df4e
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jul 18 22:05:46 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jul 18 22:07:22 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/ambari/server/agent/hello.rs     | 0
 ambari-server/src/main/resources/stacks/stack_advisor.py      | 7 ++++---
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8de65173/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs b/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de65173/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 67f7fe0..321ac4e 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2017,9 +2017,10 @@ class DefaultStackAdvisor(StackAdvisor):
     if cluster_env and "stack_root" in cluster_env:
       stack_root_as_str = cluster_env["stack_root"]
       stack_roots = json.loads(stack_root_as_str)
-      stack_name = cluster_env["stack_name"]
-      if stack_name in stack_roots:
-        stack_root = stack_roots[stack_name]
+      if "stack_name" in cluster_env:
+        stack_name = cluster_env["stack_name"]
+        if stack_name in stack_roots:
+          stack_root = stack_roots[stack_name]
 
     return stack_root
 


[28/50] [abbrv] ambari git commit: AMBARI-21515. Fix truststore options values (akovalenko)

Posted by nc...@apache.org.
AMBARI-21515. Fix truststore options values (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ba977e5a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ba977e5a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ba977e5a

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: ba977e5a95a0a579507259ba2ea114594c5345b8
Parents: 56f05f0
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Wed Jul 19 15:02:41 2017 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Wed Jul 19 15:02:41 2017 +0300

----------------------------------------------------------------------
 .../controllers/authentication/AuthenticationMainCtrl.js      | 4 ++--
 .../main/resources/ui/admin-web/app/scripts/i18n.config.js    | 7 -------
 2 files changed, 2 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ba977e5a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/authentication/AuthenticationMainCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/authentication/AuthenticationMainCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/authentication/AuthenticationMainCtrl.js
index f9c9e61..bce9189 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/authentication/AuthenticationMainCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/authentication/AuthenticationMainCtrl.js
@@ -25,9 +25,9 @@ angular.module('ambariAdminConsole')
     $scope.isLDAPEnabled = false;
     $scope.connectivity = {
       trustStore: 'default',
-      trustStoreOptions: ['authentication.connectivity.trustStore.options.default', 'authentication.connectivity.trustStore.options.custom'],
+      trustStoreOptions: ['default', 'custom'],
       trustStoreType: 'jks',
-      trustStoreTypeOptions: ['authentication.connectivity.trustStoreType.options.jks', 'authentication.connectivity.trustStoreType.options.jceks', 'authentication.connectivity.trustStoreType.options.pkcs12']
+      trustStoreTypeOptions: ['jks', 'jceks', 'pkcs12']
     };
     $scope.attributes = {
       detection: 'auto'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ba977e5a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index 43b32da..dd930fa 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@ -434,17 +434,10 @@ angular.module('ambariAdminConsole')
 
     'authentication.connectivity.trustStore.label': 'Trust Store',
 
-    'authentication.connectivity.trustStore.options.default': 'JDK Default',
-    'authentication.connectivity.trustStore.options.custom': 'Custom',
-
     'authentication.connectivity.trustStorePath': 'Trust Store Path',
 
     'authentication.connectivity.trustStoreType.label': 'Trust Store Type',
 
-    'authentication.connectivity.trustStoreType.options.jks': 'JKS',
-    'authentication.connectivity.trustStoreType.options.jceks': 'JCEKS',
-    'authentication.connectivity.trustStoreType.options.pkcs12': 'PKCS12',
-
     'authentication.connectivity.trustStorePassword': 'Trust Store Password',
     'authentication.connectivity.dn': 'Bind DN',
     'authentication.connectivity.bindPassword': 'Bind Password',


[32/50] [abbrv] ambari git commit: AMBARI-21524: ResourceManager HA status not reported when using VIPs (jluniya)

Posted by nc...@apache.org.
AMBARI-21524: ResourceManager HA status not reported when using VIPs (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b55e4578
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b55e4578
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b55e4578

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: b55e4578e529f247ba878803742540320ae9d6e4
Parents: 0a42f53
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Jul 19 12:23:49 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Jul 19 12:23:49 2017 -0700

----------------------------------------------------------------------
 .../internal/AbstractProviderModule.java        |  1 +
 .../internal/HttpPropertyProvider.java          | 27 ++++++++++++++++++--
 .../internal/HttpPropertyProviderTest.java      | 11 ++++++++
 3 files changed, 37 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b55e4578/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index f3211bf..0242d7c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -817,6 +817,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
             managementController.getClusters(),
             PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
             PropertyHelper.getPropertyId("HostRoles", "host_name"),
+            PropertyHelper.getPropertyId("HostRoles", "public_host_name"),
             PropertyHelper.getPropertyId("HostRoles", "component_name"),
             HTTP_PROPERTY_REQUESTS));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b55e4578/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpPropertyProvider.java
index 6a04b60..c556b06 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpPropertyProvider.java
@@ -48,6 +48,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
   private final StreamProvider streamProvider;
   private final String clusterNamePropertyId;
   private final String hostNamePropertyId;
+  private final String publicHostNamePropertyId;
   private final String componentNamePropertyId;
   private final Clusters clusters;
   private final Map<String, List<HttpPropertyRequest>> httpPropertyRequests;
@@ -60,6 +61,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
       Clusters clusters,
       String clusterNamePropertyId,
       String hostNamePropertyId,
+      String publicHostNamePropertyId,
       String componentNamePropertyId,
       Map<String, List<HttpPropertyRequest>> httpPropertyRequests) {
 
@@ -67,6 +69,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
     this.streamProvider = stream;
     this.clusterNamePropertyId = clusterNamePropertyId;
     this.hostNamePropertyId = hostNamePropertyId;
+    this.publicHostNamePropertyId = publicHostNamePropertyId;
     this.componentNamePropertyId = componentNamePropertyId;
     this.clusters = clusters;
     this.httpPropertyRequests = httpPropertyRequests;
@@ -103,6 +106,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
     for (Resource resource : resources) {
       String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
       String hostName = (String) resource.getPropertyValue(hostNamePropertyId);
+      String publicHostName = (String) resource.getPropertyValue(publicHostNamePropertyId);
       String componentName = (String) resource.getPropertyValue(componentNamePropertyId);
 
       if (clusterName != null && hostName != null && componentName != null &&
@@ -114,7 +118,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
           List<HttpPropertyRequest> httpPropertyRequestList = httpPropertyRequests.get(componentName);
 
           for (HttpPropertyRequest httpPropertyRequest : httpPropertyRequestList) {
-            populateResource(httpPropertyRequest, resource, cluster, hostName);
+            populateResource(httpPropertyRequest, resource, cluster, hostName, publicHostName);
           }
         } catch (AmbariException e) {
           String msg = String.format("Could not load cluster with name %s.", clusterName);
@@ -128,7 +132,7 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
 
   // populate the given resource from the given HTTP property request.
   private void populateResource(HttpPropertyRequest httpPropertyRequest, Resource resource,
-                                Cluster cluster, String hostName) throws SystemException {
+                                Cluster cluster, String hostName, String publicHostName) throws SystemException {
 
     String url = httpPropertyRequest.getUrl(cluster, hostName);
 
@@ -146,6 +150,25 @@ public class HttpPropertyProvider extends BaseProvider implements PropertyProvid
       }
     } catch (Exception e) {
       LOG.debug(String.format("Error reading HTTP response from %s", url), e);
+      if(publicHostName != null && !publicHostName.equalsIgnoreCase(hostName)) {
+        String publicUrl = httpPropertyRequest.getUrl(cluster, publicHostName);
+        LOG.debug(String.format("Retry using public host name url %s", publicUrl));
+        try {
+          InputStream inputStream = streamProvider.readFrom(publicUrl);
+
+          try {
+            httpPropertyRequest.populateResource(resource, inputStream);
+          } finally {
+            try {
+              inputStream.close();
+            } catch (IOException ioe) {
+              LOG.error(String.format("Error closing HTTP response stream %s", url), ioe);
+            }
+          }
+        } catch (Exception ex) {
+          LOG.debug(String.format("Error reading HTTP response from public host name url %s", url), ex);
+        }
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b55e4578/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
index 2eb02d1..7c8a6b1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
@@ -44,6 +44,7 @@ import org.junit.Test;
 public class HttpPropertyProviderTest {
   private static final String PROPERTY_ID_CLUSTER_NAME = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
   private static final String PROPERTY_ID_HOST_NAME = PropertyHelper.getPropertyId("HostRoles", "host_name");
+  private static final String PROPERTY_ID_PUBLIC_HOST_NAME = PropertyHelper.getPropertyId("HostRoles", "public_host_name");
   private static final String PROPERTY_ID_COMPONENT_NAME = PropertyHelper.getPropertyId("HostRoles", "component_name");
 
   private static final String PROPERTY_ID_STALE_CONFIGS = PropertyHelper.getPropertyId(
@@ -85,12 +86,14 @@ public class HttpPropertyProviderTest {
             streamProvider, clusters,
             PROPERTY_ID_CLUSTER_NAME,
             PROPERTY_ID_HOST_NAME,
+            PROPERTY_ID_PUBLIC_HOST_NAME,
             PROPERTY_ID_COMPONENT_NAME,
             HTTP_PROPERTY_REQUESTS);
 
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
+    resource.setProperty(PROPERTY_ID_PUBLIC_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, "RESOURCEMANAGER");
 
@@ -134,12 +137,14 @@ public class HttpPropertyProviderTest {
         streamProvider, clusters,
         PROPERTY_ID_CLUSTER_NAME,
         PROPERTY_ID_HOST_NAME,
+        PROPERTY_ID_PUBLIC_HOST_NAME,
         PROPERTY_ID_COMPONENT_NAME,
         HTTP_PROPERTY_REQUESTS);
 
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty(PROPERTY_ID_HOST_NAME, "lc6402.ambari.apache.org");
+    resource.setProperty(PROPERTY_ID_PUBLIC_HOST_NAME, "lc6402.ambari.apache.org");
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, "RESOURCEMANAGER");
 
@@ -174,6 +179,7 @@ public class HttpPropertyProviderTest {
         streamProvider, clusters,
         PROPERTY_ID_CLUSTER_NAME,
         PROPERTY_ID_HOST_NAME,
+        PROPERTY_ID_PUBLIC_HOST_NAME,
         PROPERTY_ID_COMPONENT_NAME,
         HTTP_PROPERTY_REQUESTS);
 
@@ -181,6 +187,7 @@ public class HttpPropertyProviderTest {
 
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
+    resource.setProperty(PROPERTY_ID_PUBLIC_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, "ATLAS_SERVER");
 
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
@@ -214,6 +221,7 @@ public class HttpPropertyProviderTest {
         streamProvider, clusters,
         PROPERTY_ID_CLUSTER_NAME,
         PROPERTY_ID_HOST_NAME,
+        PROPERTY_ID_PUBLIC_HOST_NAME,
         PROPERTY_ID_COMPONENT_NAME,
         HTTP_PROPERTY_REQUESTS);
 
@@ -221,6 +229,7 @@ public class HttpPropertyProviderTest {
 
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
+    resource.setProperty(PROPERTY_ID_PUBLIC_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, "ATLAS_SERVER");
 
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
@@ -249,12 +258,14 @@ public class HttpPropertyProviderTest {
        streamProvider, clusters,
        PROPERTY_ID_CLUSTER_NAME,
        PROPERTY_ID_HOST_NAME,
+       PROPERTY_ID_PUBLIC_HOST_NAME,
        PROPERTY_ID_COMPONENT_NAME,
        HTTP_PROPERTY_REQUESTS);
 
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
+    resource.setProperty(PROPERTY_ID_PUBLIC_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
     resource.setProperty(PROPERTY_ID_CLUSTER_NAME, "testCluster");
     resource.setProperty(PROPERTY_ID_COMPONENT_NAME, componentName);
 


[43/50] [abbrv] ambari git commit: AMBARI-21544. HiveServer2 fails to start with webhdfs call to create /hdp/apps/..jar files fails with org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException (aonishuk)

Posted by nc...@apache.org.
AMBARI-21544. HiveServer2 fails to start with webhdfs call to create /hdp/apps/..jar files  fails with  org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cfd7bb4c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cfd7bb4c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cfd7bb4c

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: cfd7bb4cbe4b0af23d92dc64b81ec1cdedc4241d
Parents: da21afc
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Jul 21 12:39:01 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Jul 21 12:39:01 2017 +0300

----------------------------------------------------------------------
 .../libraries/providers/hdfs_resource.py        | 48 +++++++++++++++++++-
 1 file changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cfd7bb4c/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index efca23d..0c45719 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -61,6 +61,11 @@ RESOURCE_TO_JSON_FIELDS = {
   'dfs_type': 'dfs_type'
 }
 
+EXCEPTIONS_TO_RETRY = {
+  # "ExceptionName": (try_count, try_sleep_seconds)
+  "LeaseExpiredException": (20, 6),
+}
+
 class HdfsResourceJar:
   """
   This is slower than HdfsResourceWebHDFS implementation of HdfsResouce, but it works in any cases on any DFS types.
@@ -132,6 +137,17 @@ class HdfsResourceJar:
     # Clean
     env.config['hdfs_files'] = []
 
+
+class WebHDFSCallException(Fail):
+  def __init__(self, message, result_message):
+    self.result_message = result_message
+    super(WebHDFSCallException, self).__init__(message)
+
+  def get_exception_name(self):
+    if isinstance(self.result_message, dict) and "RemoteException" in self.result_message and "exception" in self.result_message["RemoteException"]:
+      return self.result_message["RemoteException"]["exception"]
+    return None
+
 class WebHDFSUtil:
   def __init__(self, hdfs_site, run_user, security_enabled, logoutput=None):
     https_nn_address = namenode_ha_utils.get_property_for_active_namenode(hdfs_site, 'dfs.namenode.https-address',
@@ -153,8 +169,36 @@ class WebHDFSUtil:
     # only hdfs seems to support webHDFS
     return (is_webhdfs_enabled and default_fs.startswith("hdfs"))
     
+  def run_command(self, *args, **kwargs):
+    """
+    This functions is a wrapper for self._run_command which does retry routine for it.
+    """
+    try:
+      return self._run_command(*args, **kwargs)
+    except WebHDFSCallException as ex:
+      exception_name = ex.get_exception_name()
+      if exception_name in EXCEPTIONS_TO_RETRY:
+        try_count, try_sleep = EXCEPTIONS_TO_RETRY[exception_name]
+        last_exception = ex
+      else:
+        raise
+
+    while True:
+      Logger.info("Retrying after {0} seconds. Reason: {1}".format(try_sleep, str(last_exception)))
+      try_count -= 1
+      time.sleep(try_sleep)
+
+      if try_count == 0:
+        break
+
+      try:
+        self._run_command(*args, **kwargs)
+        break
+      except WebHDFSCallException as ex:
+        last_exception = ex
+
   valid_status_codes = ["200", "201"]
-  def run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
+  def _run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
     """
     assertable_result - some POST requests return '{"boolean":false}' or '{"boolean":true}'
     depending on if query was successful or not, we can assert this for them
@@ -201,7 +245,7 @@ class WebHDFSUtil:
       formatted_output = json.dumps(result_dict, indent=2) if isinstance(result_dict, dict) else result_dict
       formatted_output = err + "\n" + formatted_output
       err_msg = "Execution of '%s' returned status_code=%s. %s" % (shell.string_cmd_from_args_list(cmd), status_code, formatted_output)
-      raise Fail(err_msg)
+      raise WebHDFSCallException(err_msg, result_dict)
     
     return result_dict
     


[05/50] [abbrv] ambari git commit: AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)

Posted by nc...@apache.org.
AMBARI-21454. hive20 and wfmanager views fails to build with issue missing module babel-plugin-transform-es2015-block-scoping (Vijay Kumar via via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb1adcbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb1adcbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb1adcbf

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: eb1adcbff32fb9440f288ccaddc997297eb8e4fb
Parents: f27f3af
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Jul 12 16:30:49 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Jul 12 16:30:49 2017 -0700

----------------------------------------------------------------------
 contrib/views/hive20/src/main/resources/ui/package.json    | 1 +
 contrib/views/wfmanager/src/main/resources/ui/package.json | 1 +
 2 files changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/hive20/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/package.json b/contrib/views/hive20/src/main/resources/ui/package.json
index a409111..eea8cf9 100644
--- a/contrib/views/hive20/src/main/resources/ui/package.json
+++ b/contrib/views/hive20/src/main/resources/ui/package.json
@@ -24,6 +24,7 @@
     "bootstrap-daterangepicker": "2.1.24",
     "bower": "^1.7.9",
     "broccoli-asset-rev": "^2.4.2",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "^2.0.1",
     "ember-cli": "2.7.0",
     "ember-cli-app-version": "^1.0.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb1adcbf/contrib/views/wfmanager/src/main/resources/ui/package.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/package.json b/contrib/views/wfmanager/src/main/resources/ui/package.json
index 25ed6c1..69f43c8 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/package.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/package.json
@@ -21,6 +21,7 @@
   "devDependencies": {
     "bower": "^1.7.7",
     "broccoli-asset-rev": "^2.2.0",
+    "babel-plugin-transform-es2015-block-scoping": "^6.24.1",
     "ember-ajax": "0.7.1",
     "ember-cli": "2.3.0",
     "ember-cli-app-version": "^1.0.0",


[09/50] [abbrv] ambari git commit: AMBARI-19038. Support migration of LDAP users & groups to PAM (rlevas)

Posted by nc...@apache.org.
AMBARI-19038. Support migration of LDAP users & groups to PAM (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7fac037
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7fac037
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7fac037

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f7fac03778fda337bc96ed49ed7507e1af118b7d
Parents: f22256e
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 10:47:17 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 10:47:17 2017 -0400

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |   3 +
 ambari-server/pom.xml                           |   2 +-
 ambari-server/sbin/ambari-server                |   6 +-
 .../LdapToPamMigrationHelper.java               |  73 ++++++++++++
 .../server/security/authorization/Users.java    |   4 +
 ambari-server/src/main/python/ambari-server.py  |  14 ++-
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../main/python/ambari_server/setupSecurity.py  | 119 ++++++++++++++++---
 8 files changed, 198 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
index 21d0fd6..a63ebe2 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
@@ -68,6 +68,7 @@ angular.module('ambariAdminConsole')
 
   function loadMembers(){
     $scope.group.getMembers().then(function(members) {
+      $scope.group.groupTypeName = $t(GroupConstants.TYPES[$scope.group.group_type].LABEL_KEY);
       $scope.groupMembers = members;
       $scope.group.editingUsers = angular.copy($scope.groupMembers);
     });
@@ -81,6 +82,8 @@ angular.module('ambariAdminConsole')
     loadMembers();
   });
 
+  $scope.group.getGroupType();
+
   $scope.deleteGroup = function(group) {
     ConfirmationModal.show(
       $t('common.delete', {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 878665e..70907da 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -1684,7 +1684,7 @@
     <dependency>
       <groupId>net.java.dev.jna</groupId>
       <artifactId>jna</artifactId>
-      <version>4.1.0</version>
+      <version>4.3.0</version>
     </dependency>
   </dependencies>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/sbin/ambari-server
----------------------------------------------------------------------
diff --git a/ambari-server/sbin/ambari-server b/ambari-server/sbin/ambari-server
index 24ec43a..1c6c612 100755
--- a/ambari-server/sbin/ambari-server
+++ b/ambari-server/sbin/ambari-server
@@ -137,6 +137,10 @@ case "${1:-}" in
         echo -e "Setting up PAM properties..."
         $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
         ;;
+  migrate-ldap-pam)
+        echo -e "Migration LDAP to PAM"
+        $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
+        ;;
   setup-ldap)
         echo -e "Setting up LDAP properties..."
         $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
@@ -203,7 +207,7 @@ case "${1:-}" in
         ;;
   *)
         echo "Usage: $AMBARI_EXECUTABLE
-        {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos} [options]
+        {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos|setup-pam|migrate-ldap-pam} [options]
         Use $AMBARI_PYTHON_EXECUTABLE <action> --help to get details on options available.
         Or, simply invoke ambari-server.py --help to print the options."
         exit 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
new file mode 100644
index 0000000..8a3a012
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security.authentication;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.audit.AuditLoggerModule;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DbType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
+public class LdapToPamMigrationHelper {
+  private static final Logger LOG = LoggerFactory.getLogger(LdapToPamMigrationHelper.class);
+
+  @Inject
+  private DBAccessor dbAccessor;
+
+  /**
+   * Migrate LDAP user & groups to PAM
+   *
+   * @throws SQLException if an error occurs while executing the needed SQL statements
+   */
+  private void migrateLdapUsersGroups() throws SQLException {
+    if (dbAccessor.getDbType() != DbType.ORACLE) { // Tested MYSQL, POSTGRES && MYSQL)
+      dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from (select user_name from users where user_type = 'PAM') as a)");
+      dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from (select group_name from groups where group_type = 'PAM') as a)");
+    } else { // Tested ORACLE
+      dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from users where user_type = 'PAM')");
+      dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from groups where group_type = 'PAM')");
+    }
+  }
+
+  /**
+   * Support changes needed to migrate LDAP users & groups to PAM
+   *
+   * @param args Simple key value json map
+   */
+  public static void main(String[] args) {
+
+    try {
+      Injector injector = Guice.createInjector(new ControllerModule(), new AuditLoggerModule());
+      LdapToPamMigrationHelper migrationHelper = injector.getInstance(LdapToPamMigrationHelper.class);
+
+      migrationHelper.migrateLdapUsersGroups();
+
+    } catch (Throwable t) {
+      LOG.error("Caught exception on migration. Exiting...", t);
+      System.exit(1);
+    }
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 9cdde8f..16c6c16 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -162,6 +162,10 @@ public class Users {
     if (userEntity != null) {
       userEntities.add(userEntity);
     }
+    userEntity = userDAO.findUserByNameAndType(userName, UserType.PAM);
+    if (userEntity != null) {
+      userEntities.add(userEntity);
+    }
     return (userEntities.isEmpty() || userEntities.size() > 1) ? null : new User(userEntities.get(0));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 5adcb04..8fcde77 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -52,8 +52,8 @@ from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SY
   SETUP_ACTION, SETUP_SECURITY_ACTION,START_ACTION, STATUS_ACTION, STOP_ACTION, RESTART_ACTION, UPGRADE_ACTION, \
   SETUP_JCE_ACTION, SET_CURRENT_ACTION, START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, \
   SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION, SETUP_SSO_ACTION, \
-  DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, KERBEROS_SETUP_ACTION
-from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam
+  DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, MIGRATE_LDAP_PAM_ACTION, KERBEROS_SETUP_ACTION
+from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam, migrate_ldap_pam
 from ambari_server.userInput import get_validated_string_input
 from ambari_server.kerberos_setup import setup_kerberos
 
@@ -540,6 +540,11 @@ def init_ldap_setup_parser_options(parser):
   parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
 
 @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_pam_setup_parser_options(parser):
+  parser.add_option('--pam-config-file', default=None, help="Path to the PAM configuration file", dest="pam_config_file")
+  parser.add_option('--pam-auto-create-groups', default=None, help="Automatically create groups for authenticated users [true/false]", dest="pam_auto_create_groups")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
 def init_set_current_parser_options(parser):
   parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
   parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
@@ -783,7 +788,8 @@ def create_user_action_map(args, options):
         INSTALL_MPACK_ACTION: UserAction(install_mpack, options),
         UNINSTALL_MPACK_ACTION: UserAction(uninstall_mpack, options),
         UPGRADE_MPACK_ACTION: UserAction(upgrade_mpack, options),
-        PAM_SETUP_ACTION: UserAction(setup_pam),
+        PAM_SETUP_ACTION: UserAction(setup_pam, options),
+        MIGRATE_LDAP_PAM_ACTION: UserAction(migrate_ldap_pam, options),
         KERBEROS_SETUP_ACTION: UserAction(setup_kerberos, options)
       }
   return action_map
@@ -814,7 +820,7 @@ def init_action_parser(action, parser):
     INSTALL_MPACK_ACTION: init_install_mpack_parser_options,
     UNINSTALL_MPACK_ACTION: init_uninstall_mpack_parser_options,
     UPGRADE_MPACK_ACTION: init_upgrade_mpack_parser_options,
-    PAM_SETUP_ACTION: init_empty_parser_options,
+    PAM_SETUP_ACTION: init_pam_setup_parser_options,
     KERBEROS_SETUP_ACTION: init_kerberos_setup_parser_options,
   }
   parser.add_option("-v", "--verbose",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari_server/setupActions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
index 707cb84..61d20af 100644
--- a/ambari-server/src/main/python/ambari_server/setupActions.py
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -47,4 +47,5 @@ INSTALL_MPACK_ACTION = "install-mpack"
 UNINSTALL_MPACK_ACTION = "uninstall-mpack"
 UPGRADE_MPACK_ACTION = "upgrade-mpack"
 PAM_SETUP_ACTION = "setup-pam"
+MIGRATE_LDAP_PAM_ACTION = "migrate-ldap-pam"
 KERBEROS_SETUP_ACTION = "setup-kerberos"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari_server/setupSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
index 17d1025..f175d7c 100644
--- a/ambari-server/src/main/python/ambari_server/setupSecurity.py
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -37,9 +37,9 @@ from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons.os_utils import is_root, set_file_permissions, \
   run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
-from ambari_server.serverConfiguration import configDefaults, \
+from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
   encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
-  get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, write_property, \
+  get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
   get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
   read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
   BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, IS_LDAP_CONFIGURED, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
@@ -54,6 +54,8 @@ from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_ba
 from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
 from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, quit_if_has_answer
 from ambari_server.serverClassPath import ServerClassPath
+from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, \
+  get_jdbc_driver_path, ensure_jdbc_driver_is_installed, LINUX_DBMS_KEYS_LIST
 
 logger = logging.getLogger(__name__)
 
@@ -64,6 +66,9 @@ REGEX_TRUE_FALSE = "^(true|false)?$"
 REGEX_SKIP_CONVERT = "^(skip|convert)?$"
 REGEX_REFERRAL = "^(follow|ignore)?$"
 REGEX_ANYTHING = ".*"
+LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
+                                   "org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
+                                   " >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
 
 CLIENT_SECURITY_KEY = "client.security"
 
@@ -621,8 +626,12 @@ def setup_ldap(options):
   properties = get_ambari_properties()
 
   if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'pam':
-    err = "PAM is configured. Can not setup LDAP."
-    raise FatalException(1, err)
+    query = "PAM is currently configured, do you wish to use LDAP instead [y/n] (n)? "
+    if get_YN_input(query, False):
+      pass
+    else:
+      err = "PAM is configured. Can not setup LDAP."
+      raise FatalException(1, err)
 
   isSecure = get_is_secure(properties)
 
@@ -824,38 +833,112 @@ def ensure_can_start_under_current_user(ambari_user):
   return current_user
 
 class PamPropTemplate:
-  def __init__(self, properties, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
+  def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
     self.prop_name = i_prop_name
+    self.option = i_option
     self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
     self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
     self.prompt_regex = i_prompt_regex
     self.allow_empty_prompt = i_allow_empty_prompt
 
-def setup_pam():
+def init_pam_properties_list_reqd(properties, options):
+  properties = [
+    PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
+    PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
+  ]
+  return properties
+
+def setup_pam(options):
   if not is_root():
-    err = 'Ambari-server setup-pam should be run with ' \
-          'root-level privileges'
+    err = 'Ambari-server setup-pam should be run with root-level privileges'
     raise FatalException(4, err)
 
   properties = get_ambari_properties()
 
   if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'ldap':
-    err = "LDAP is configured. Can not setup PAM."
-    raise FatalException(1, err)
+    query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
+    if get_YN_input(query, False):
+      pass
+    else:
+      err = "LDAP is configured. Can not setup PAM."
+      raise FatalException(1, err)
+
+  pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
 
   pam_property_value_map = {}
   pam_property_value_map[CLIENT_SECURITY_KEY] = 'pam'
 
-  pamConfig = get_validated_string_input("Enter PAM configuration file: ", PAM_CONFIG_FILE, REGEX_ANYTHING,
-                                         "Invalid characters in the input!", False, False)
-
-  pam_property_value_map[PAM_CONFIG_FILE] = pamConfig
+  for pam_prop in pam_property_list_reqd:
+    input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
+                                       "Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
+                                       answer = pam_prop.option)
+    if input is not None and input != "":
+      pam_property_value_map[pam_prop.prop_name] = input
 
-  if get_YN_input("Do you want to allow automatic group creation [y/n] (y)? ", True):
-    pam_property_value_map[AUTO_GROUP_CREATION] = 'true'
-  else:
-    pam_property_value_map[AUTO_GROUP_CREATION] = 'false'
+  # Verify that the PAM config file exists, else show warning...
+  pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
+  if not os.path.exists(pam_config_file):
+    print_warning_msg("The PAM configuration file, {0} does not exist.  " \
+                      "Please create it before restarting Ambari.".format(pam_config_file))
 
   update_properties_2(properties, pam_property_value_map)
   print 'Saving...done'
   return 0
+
+#
+# Migration of LDAP users & groups to PAM
+#
+def migrate_ldap_pam(args):
+  properties = get_ambari_properties()
+
+  if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") != 'pam':
+    err = "PAM is not configured. Please configure PAM authentication first."
+    raise FatalException(1, err)
+
+  db_title = get_db_type(properties).title
+  confirm = get_YN_input("Ambari Server configured for %s. Confirm "
+                        "you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
+
+  if not confirm:
+    print_error_msg("Database backup is not confirmed")
+    return 1
+
+  jdk_path = get_java_exe_path()
+  if jdk_path is None:
+    print_error_msg("No JDK found, please run the \"setup\" "
+                    "command to install a JDK automatically or install any "
+                    "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
+    return 1
+
+  # At this point, the args does not have the ambari database information.
+  # Augment the args with the correct ambari database information
+  parse_properties_file(args)
+
+  ensure_jdbc_driver_is_installed(args, properties)
+
+  print 'Migrating LDAP Users & Groups to PAM'
+
+  serverClassPath = ServerClassPath(properties, args)
+  class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
+
+  command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
+
+  ambari_user = read_ambari_user()
+  current_user = ensure_can_start_under_current_user(ambari_user)
+  environ = generate_env(args, ambari_user, current_user)
+
+  (retcode, stdout, stderr) = run_os_command(command, env=environ)
+  print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
+  if stdout:
+    print "Console output from LDAP to PAM migration command:"
+    print stdout
+    print
+  if stderr:
+    print "Error output from LDAP to PAM migration command:"
+    print stderr
+    print
+  if retcode > 0:
+    print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
+  else:
+    print_info_msg('LDAP to PAM migration completed')
+  return retcode


[08/50] [abbrv] ambari git commit: AMBARI-21234. Ambari rack awareness for Kafka. (Ambud Sharma via stoader)

Posted by nc...@apache.org.
AMBARI-21234. Ambari rack awareness for Kafka. (Ambud Sharma via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f22256e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f22256e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f22256e7

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: f22256e73af4e4cb27d3aaf47ba58a8864e37873
Parents: 63186bf
Author: Ambud Sharma <am...@hortonworks.com>
Authored: Fri Jul 14 16:04:17 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Fri Jul 14 16:04:17 2017 +0200

----------------------------------------------------------------------
 .../common-services/KAFKA/0.10.0.3.0/metainfo.xml       |  1 +
 .../KAFKA/0.10.0.3.0/package/scripts/kafka.py           | 10 ++++++++++
 .../KAFKA/0.10.0.3.0/package/scripts/params.py          |  3 +++
 .../resources/common-services/KAFKA/0.10.0/metainfo.xml |  1 +
 .../KAFKA/0.8.1/package/scripts/kafka.py                | 12 ++++++++++++
 .../KAFKA/0.8.1/package/scripts/params.py               |  3 +++
 6 files changed, 30 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
index a19850e..f408ba3 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
@@ -104,6 +104,7 @@
                 </osSpecific>
             </osSpecifics>
             <restartRequiredAfterChange>true</restartRequiredAfterChange>
+            <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
         </service>
     </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
index 680dd32..62a9003 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
@@ -103,6 +103,16 @@ def kafka(upgrade_type=None):
 
     kafka_data_dir = kafka_server_config['log.dirs']
     kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+
+    rack="/default-rack"
+    i=0
+    if len(params.all_racks) > 0:
+     for host in params.all_hosts:
+      if host == params.hostname:
+        rack=params.all_racks[i]
+        break
+      i=i+1
+
     Directory(kafka_data_dirs,
               mode=0755,
               cd_access='a',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
index 5b0be54..4d0448f 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
@@ -105,6 +105,9 @@ zookeeper_hosts.sort()
 secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
 kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
 
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+
 #Kafka log4j
 kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
 kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
index 84b47d8..c1fcde8 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
@@ -22,6 +22,7 @@
       <name>KAFKA</name>
       <extends>common-services/KAFKA/0.9.0</extends>
       <version>0.10.0</version>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
index 680dd32..3fe1e2d 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
@@ -103,6 +103,18 @@ def kafka(upgrade_type=None):
 
     kafka_data_dir = kafka_server_config['log.dirs']
     kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+
+    rack="/default-rack"
+    i=0
+    if len(params.all_racks) > 0:
+     for host in params.all_hosts:
+      if host == params.hostname:
+        rack=params.all_racks[i]
+        break
+      i=i+1
+
+    kafka_server_config['broker.rack']=rack
+
     Directory(kafka_data_dirs,
               mode=0755,
               cd_access='a',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 9acc1ef..c7e84fc 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -105,6 +105,9 @@ zookeeper_hosts.sort()
 secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
 kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
 
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+
 #Kafka log4j
 kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
 kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)


[39/50] [abbrv] ambari git commit: AMBARI-21535. ACTIVITY_ANALYZER Install failed: Error: Unable to run the custom hook script (aonishuk)

Posted by nc...@apache.org.
AMBARI-21535. ACTIVITY_ANALYZER Install failed: Error: Unable to run the custom hook script (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9c451107
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9c451107
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9c451107

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 9c451107f316dbfcc45f99d536d2a6d4a4d99249
Parents: d999343
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Jul 20 16:21:20 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Jul 20 16:21:20 2017 +0300

----------------------------------------------------------------------
 .../hooks/before-ANY/files/changeToSecureUid.sh  |  2 ++
 .../before-ANY/scripts/shared_initialization.py  | 19 ++++++-------------
 2 files changed, 8 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9c451107/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
index 4663f10..a6b8b77 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
@@ -44,6 +44,8 @@ if [ -z $2 ]; then
   fi
   echo $newUid
   exit 0
+else
+  find_available_uid
 fi
 
 if [ $newUid -eq 0 ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/9c451107/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index b687229..ee950e8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -44,19 +44,12 @@ def setup_users():
       )
 
     for user in params.user_list:
-      if params.override_uid == "true":
-        User(user,
-             uid = get_uid(user),
-             gid = params.user_to_gid_dict[user],
-             groups = params.user_to_groups_dict[user],
-             fetch_nonlocal_groups = params.fetch_nonlocal_groups,
-             )
-      else:
-        User(user,
-             gid = params.user_to_gid_dict[user],
-             groups = params.user_to_groups_dict[user],
-             fetch_nonlocal_groups = params.fetch_nonlocal_groups,
-             )
+      User(user,
+           uid = get_uid(user) if params.override_uid == "true" else None,
+           gid = params.user_to_gid_dict[user],
+           groups = params.user_to_groups_dict[user],
+           fetch_nonlocal_groups = params.fetch_nonlocal_groups,
+           )
 
     if params.override_uid == "true":
       set_uid(params.smoke_user, params.smoke_user_dirs)


[25/50] [abbrv] ambari git commit: AMBARI-21510. Convert calculated value for 'hive.server2.tez.sessions.per.default.queue' to long before setting it.

Posted by nc...@apache.org.
AMBARI-21510. Convert calculated value for 'hive.server2.tez.sessions.per.default.queue' to long before setting it.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/016df4e9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/016df4e9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/016df4e9

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: 016df4e9f795ff3b05e27dd49d8e81bd4e9dc28e
Parents: 274a995
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 18 12:48:53 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Tue Jul 18 15:24:23 2017 -0700

----------------------------------------------------------------------
 .../common-services/YARN/3.0.0.3.0/service_advisor.py         | 7 +++----
 .../main/resources/stacks/HDP/2.5/services/stack_advisor.py   | 7 +++----
 .../src/test/python/stacks/2.5/common/test_stack_advisor.py   | 4 ++--
 3 files changed, 8 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 0fb538d..74e0510 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -1008,11 +1008,10 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
 
     if not llap_concurrency_in_changed_configs:
       min_llap_concurrency = 1
-      putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
-      putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
-                                              min_llap_concurrency)
+      putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', long(llap_concurrency))
+      putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", min_llap_concurrency)
 
-    putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", max_llap_concurreny)
+    putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", long(max_llap_concurreny))
 
     num_llap_nodes = long(num_llap_nodes)
     putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", min_nodes_required)

http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 1c19d8b..92b7367 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1252,11 +1252,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
 
     if not llap_concurrency_in_changed_configs:
       min_llap_concurrency = 1
-      putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
-      putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
-                                              min_llap_concurrency)
+      putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', long(llap_concurrency))
+      putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", min_llap_concurrency)
 
-    putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", max_llap_concurreny)
+    putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", long(max_llap_concurreny))
 
     num_llap_nodes = long(num_llap_nodes)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index bf0cbec..fc5f220 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -1561,7 +1561,7 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts)
 
-    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '3.0'})
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '3'})
 
     self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes_for_llap_daemons'], 3)
@@ -2773,7 +2773,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts)
 
     self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler': 'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=2.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=2.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.ordering-policy=priority-utilization\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.roo
 t.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.priority=10\nyarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=98.0\nyarn.scheduler.capacity.root.llap.capacity=98.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1.0')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '4', 'minimum': '1'})
 
     self.assertTrue('num_llap_nodes_for_llap_daemons' not in configurations['hive-interactive-env']['properties'])


[40/50] [abbrv] ambari git commit: AMBARI-21530 - Service Checks During Upgrades Should Use Desired Stack (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-21530 - Service Checks During Upgrades Should Use Desired Stack (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e87a3e31
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e87a3e31
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e87a3e31

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: e87a3e31a9a18c5178f1170cef15c4de47f6808e
Parents: 9c45110
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 19 22:04:07 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jul 20 17:15:21 2017 -0400

----------------------------------------------------------------------
 .../actionmanager/ExecutionCommandWrapper.java  | 34 +++++++-
 .../AmbariCustomCommandExecutionHelper.java     |  7 +-
 .../AmbariManagementControllerImpl.java         |  7 +-
 .../internal/UpgradeResourceProvider.java       | 91 +++++++++++++++-----
 .../2.1.0.2.0/package/scripts/historyserver.py  |  2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   | 11 ++-
 .../2.1.0.2.0/package/scripts/service_check.py  |  6 +-
 .../src/test/python/TestStackFeature.py         | 61 +++++++++++++
 8 files changed, 174 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index efd609a..91db7d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -17,6 +17,9 @@
  */
 package org.apache.ambari.server.actionmanager;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.TreeMap;
@@ -27,6 +30,7 @@ import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
@@ -35,6 +39,9 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -63,6 +70,12 @@ public class ExecutionCommandWrapper {
   @Inject
   private Gson gson;
 
+  /**
+   * Used for injecting hooks and common-services into the command.
+   */
+  @Inject
+  private AmbariMetaInfo ambariMetaInfo;
+
   @AssistedInject
   public ExecutionCommandWrapper(@Assisted String jsonExecutionCommand) {
     this.jsonExecutionCommand = jsonExecutionCommand;
@@ -208,9 +221,28 @@ public class ExecutionCommandWrapper {
           }
         }
 
+        Map<String, String> commandParams = executionCommand.getCommandParams();
+
         if (null != repositoryVersion) {
-          executionCommand.getCommandParams().put(KeyNames.VERSION, repositoryVersion.getVersion());
+          commandParams.put(KeyNames.VERSION, repositoryVersion.getVersion());
           executionCommand.getHostLevelParams().put(KeyNames.CURRENT_VERSION, repositoryVersion.getVersion());
+
+          StackId stackId = repositoryVersion.getStackId();
+          StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
+              stackId.getStackVersion());
+
+          if (!commandParams.containsKey(HOOKS_FOLDER)) {
+            commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+          }
+
+          if (!commandParams.containsKey(SERVICE_PACKAGE_FOLDER)) {
+            if (!StringUtils.isEmpty(serviceName)) {
+              ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
+                  stackId.getStackVersion(), serviceName);
+
+              commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+            }
+          }
         }
       } catch (ServiceNotFoundException serviceNotFoundException) {
         // it's possible that there are commands specified for a service where

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 5180870..0b140e4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -27,7 +27,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_CO
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
@@ -36,7 +35,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JD
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
@@ -492,8 +490,6 @@ public class AmbariCustomCommandExecutionHelper {
       }
 
       commandParams.put(COMMAND_TIMEOUT, "" + commandTimeout);
-      commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
-      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
       Map<String, String> roleParams = execCmd.getRoleParams();
       if (roleParams == null) {
@@ -811,8 +807,7 @@ public class AmbariCustomCommandExecutionHelper {
       actualTimeout = actualTimeout < MIN_STRICT_SERVICE_CHECK_TIMEOUT ? MIN_STRICT_SERVICE_CHECK_TIMEOUT : actualTimeout;
       commandParams.put(COMMAND_TIMEOUT, Integer.toString(actualTimeout));
     }
-    commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
-    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+
     StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
     execCmd.setCommandParams(commandParams);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 4229d34..44943c7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -28,7 +28,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_T
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MAX_DURATION_OF_RETRIES;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
@@ -36,7 +35,6 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_V
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED_KEY_JCE_REQUIRED;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
@@ -1536,7 +1534,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         Map.Entry<String, String> pair = (Map.Entry) it.next();
         // Check the value if both keys exist
         if (newConfigValues.containsKey(pair.getKey())) {
-          if (!newConfigValues.get((String) pair.getKey()).equals(pair.getValue())) {
+          if (!newConfigValues.get(pair.getKey()).equals(pair.getValue())) {
             configsChanged.put(pair.getKey(), "changed");
           }
         } else {
@@ -2459,9 +2457,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
 
     commandParams.put(COMMAND_TIMEOUT, actualTimeout);
-    commandParams.put(SERVICE_PACKAGE_FOLDER,
-      serviceInfo.getServicePackageFolder());
-    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
     String customCacheDirectory = componentInfo.getCustomFolder();
     if (customCacheDirectory != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 22858dd..e8cd220 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -856,6 +856,63 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
   }
 
+  /**
+   * Adds the hooks and service folders based on the effective stack ID and the
+   * name of the service from the wrapper.
+   *
+   * @param wrapper
+   *          the stage wrapper to use when detemrining the service name.
+   * @param effectiveStackId
+   *          the stack ID to use when getting the hooks and service folders.
+   * @param commandParams
+   *          the params to update with the new values
+   * @throws AmbariException
+   */
+  private void applyRepositoryAssociatedParameters(StageWrapper wrapper, StackId effectiveStackId,
+      Map<String, String> commandParams) throws AmbariException {
+    if (CollectionUtils.isNotEmpty(wrapper.getTasks())
+        && wrapper.getTasks().get(0).getService() != null) {
+
+      AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
+
+      StackInfo stackInfo = ambariMetaInfo.getStack(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion());
+
+      String serviceName = wrapper.getTasks().get(0).getService();
+      ServiceInfo serviceInfo = ambariMetaInfo.getService(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion(), serviceName);
+
+      commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+    }
+  }
+
+  /**
+   * Creates an action stage using the {@link #EXECUTE_TASK_ROLE} custom action
+   * to execute some Python command.
+   *
+   * @param context
+   *          the upgrade context.
+   * @param request
+   *          the request object to add the stage to.
+   * @param effectiveRepositoryVersion
+   *          the stack/version to use when generating content for the command.
+   *          On some upgrade types, this may change during the course of the
+   *          upgrade orchestration. An express upgrade changes this after
+   *          stopping all services.
+   * @param entity
+   *          the upgrade entity to set the stage information on
+   * @param wrapper
+   *          the stage wrapper containing information to generate the stage.
+   * @param skippable
+   *          {@code true} to mark the stage as being skippable if a failure
+   *          occurs.
+   * @param supportsAutoSkipOnFailure
+   *          {@code true} to automatically skip on a failure.
+   * @param allowRetry
+   *          {@code true} to be able to retry the failed stage.
+   * @throws AmbariException
+   */
   private void makeActionStage(UpgradeContext context, RequestStageContainer request,
       RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
       StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
@@ -869,38 +926,28 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     Cluster cluster = context.getCluster();
 
     LOG.debug("Analyzing upgrade item {} with tasks: {}.", entity.getText(), entity.getTasks());
-    Map<String, String> params = getNewParameterMap(request, context);
-    params.put(UpgradeContext.COMMAND_PARAM_TASKS, entity.getTasks());
-
-    // Apply additional parameters to the command that come from the stage.
-    applyAdditionalParameters(wrapper, params);
-
-    // Because custom task may end up calling a script/function inside a
-    // service, it is necessary to set the
-    // service_package_folder and hooks_folder params.
-    AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
-    StackId stackId = effectiveRepositoryVersion.getStackId();
-
-    StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
-        stackId.getStackVersion());
 
     // if the service/component are specified, then make sure to grab them off
     // of the wrapper so they can be stored on the command for use later
     String serviceName = null;
     String componentName = null;
-
     if (wrapper.getTasks() != null && wrapper.getTasks().size() > 0
         && wrapper.getTasks().get(0).getService() != null) {
       TaskWrapper taskWrapper = wrapper.getTasks().get(0);
       serviceName = taskWrapper.getService();
       componentName = taskWrapper.getComponent();
+    }
 
-      ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-          stackId.getStackVersion(), serviceName);
+    Map<String, String> params = getNewParameterMap(request, context);
+    params.put(UpgradeContext.COMMAND_PARAM_TASKS, entity.getTasks());
 
-      params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
-      params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
-    }
+    // Apply additional parameters to the command that come from the stage.
+    applyAdditionalParameters(wrapper, params);
+
+    // the ru_execute_tasks invokes scripts - it needs information about where
+    // the scripts live and for that it should always use the target repository
+    // stack
+    applyRepositoryAssociatedParameters(wrapper, effectiveRepositoryVersion.getStackId(), params);
 
     // add each host to this stage
     RequestResourceFilter filter = new RequestResourceFilter(serviceName, componentName,
@@ -1048,6 +1095,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // Apply additional parameters to the command that come from the stage.
     applyAdditionalParameters(wrapper, commandParams);
 
+    // add things like hooks and service folders based on effective repo
+    applyRepositoryAssociatedParameters(wrapper, effectiveRepositoryVersion.getStackId(),
+        commandParams);
+
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index d886244..0b03af4 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -90,7 +90,7 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(params)
     self.configure(env) # FOR SECURITY
 
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+    if check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.version_for_stack_feature_checks):
       # MC Hammer said, "Can't touch this"
       resource_created = copy_to_hdfs(
         "mapreduce",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 67931c6..6f75852 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -66,21 +66,20 @@ tarball_map = default("/configurations/cluster-env/tarball_map", None)
 config_path = os.path.join(stack_root, "current/hadoop-client/conf")
 config_dir = os.path.realpath(config_path)
 
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
 stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
 
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
+stack_supports_ru = check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks)
+stack_supports_timeline_state_store = check_stack_feature(StackFeature.TIMELINE_STATE_STORE, version_for_stack_feature_checks)
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
 version = default("/commandParams/version", None)
 
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
 stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
 stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index b934767..bf52ee6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -99,11 +99,7 @@ class ServiceCheckDefault(ServiceCheck):
                         mode=params.smoke_hdfs_user_mode,
                         )
 
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
-    else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
+    path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
     yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
                                            "-shell_command", "ls", "-num_containers", "{number_of_nm}",
                                            "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e87a3e31/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 230734c..6e8bcec 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -21,10 +21,13 @@ limitations under the License.
 
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.script import Script
 from resource_management.core.exceptions import Fail
 from unittest import TestCase
 
+import json
+
 Logger.initialize_logger()
 
 class TestStackFeature(TestCase):
@@ -115,6 +118,34 @@ class TestStackFeature(TestCase):
     stack_feature_version = get_stack_feature_version(command_json)
     self.assertEqual("2.5.9.9-9999", stack_feature_version)
 
+
+  def test_get_stack_feature(self):
+    """
+    Tests the stack feature version calculated during a STOP command in a downgrade.
+    :return:
+    """
+    command_json = TestStackFeature._get_cluster_upgrade_restart_json()
+    Script.config = command_json
+
+    Script.config["configurations"] = {}
+    Script.config["configurations"]["cluster-env"] = {}
+    Script.config["configurations"]["cluster-env"]["stack_features"] = {}
+    Script.config["configurations"]["cluster-env"]["stack_features"] = json.dumps(TestStackFeature._get_stack_feature_json())
+
+    stack_feature_version = get_stack_feature_version(command_json)
+    self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
+    self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
+    self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
+
+    command_json = TestStackFeature._get_cluster_install_command_json()
+    Script.config.update(command_json)
+
+    stack_feature_version = get_stack_feature_version(command_json)
+    self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
+    self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
+    self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
+
+
   @staticmethod
   def _get_cluster_install_command_json():
     """
@@ -221,4 +252,34 @@ class TestStackFeature(TestCase):
         "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
+    }
+
+  @staticmethod
+  def _get_stack_feature_json():
+    """
+    A STOP command during a downgrade.
+    :return:
+    """
+    return {
+      "HDP": {
+        "stack_features":[
+          {
+            "name":"stack-feature-1",
+            "description":"Stack Feature 1",
+            "min_version":"2.2.0.0"
+          },
+          {
+            "name":"stack-feature-2",
+            "description":"Stack Feature 2",
+            "min_version":"2.2.0.0",
+            "max_version":"2.6.0.0"
+          },
+          {
+            "name":"stack-feature-3",
+            "description":"Stack Feature 3",
+            "min_version":"2.2.0.0",
+            "max_version":"2.3.0.0"
+          }
+        ]
+      }
     }
\ No newline at end of file