You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/18 15:02:37 UTC

[01/50] [abbrv] ambari git commit: AMBARI-21446. Force-remove package does not work on CentOS 6 and SuSE 11

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-21450 ae3ce90cf -> 5cdcd0701


AMBARI-21446. Force-remove package does not work on CentOS 6 and SuSE 11


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4bbdd0e5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4bbdd0e5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4bbdd0e5

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 4bbdd0e550ed96aca5f7fcbd2036cf72543a10b3
Parents: 621f380
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Jul 12 11:13:46 2017 +0200
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Jul 12 11:21:11 2017 -0700

----------------------------------------------------------------------
 .../src/test/python/resource_management/TestPackageResource.py   | 4 ++--
 .../python/resource_management/core/providers/package/yumrpm.py  | 2 +-
 .../python/resource_management/core/providers/package/zypper.py  | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-agent/src/test/python/resource_management/TestPackageResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestPackageResource.py b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
index bc1bfeb..51a35eb 100644
--- a/ambari-agent/src/test/python/resource_management/TestPackageResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
@@ -228,7 +228,7 @@ class TestPackageResource(TestCase):
               logoutput = False,
               ignore_dependencies = True
       )
-    shell_mock.assert_called_with(['/usr/bin/rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
+    shell_mock.assert_called_with(['rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
 
   @patch.object(shell, "call", new = MagicMock(return_value=(0, None)))
   @patch.object(shell, "checked_call")
@@ -256,7 +256,7 @@ class TestPackageResource(TestCase):
               logoutput = False,
               ignore_dependencies = True
       )
-    shell_mock.assert_called_with(['/usr/bin/rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
+    shell_mock.assert_called_with(['rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
 
   @patch.object(shell, "call", new = MagicMock(return_value=(0, None)))
   @patch.object(shell, "checked_call")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index 064b504..a2f0533 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -36,7 +36,7 @@ REMOVE_CMD = {
   False: ['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'erase'],
 }
 
-REMOVE_WITHOUT_DEPENDENCIES_CMD = ['/usr/bin/rpm', '-e', '--nodeps']
+REMOVE_WITHOUT_DEPENDENCIES_CMD = ['rpm', '-e', '--nodeps']
 
 REPO_UPDATE_CMD = ['/usr/bin/yum', 'clean','metadata']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
index c1aab60..f3abdb5 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
@@ -35,7 +35,7 @@ REMOVE_CMD = {
   False: ['/usr/bin/zypper', '--quiet', 'remove', '--no-confirm'],
 }
 
-REMOVE_WITHOUT_DEPENDENCIES_CMD = ['/usr/bin/rpm', '-e', '--nodeps']
+REMOVE_WITHOUT_DEPENDENCIES_CMD = ['rpm', '-e', '--nodeps']
 
 REPO_UPDATE_CMD = ['/usr/bin/zypper', 'clean']
 


[42/50] [abbrv] ambari git commit: AMBARI-21494 - BI 4.2 Ranger Needs Properties Added on Ambari Upgrade (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21494 - BI 4.2 Ranger Needs Properties Added on Ambari Upgrade (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/606c5cab
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/606c5cab
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/606c5cab

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 606c5cabf6ea734e0c2e2928521f56394edb4f77
Parents: 55da426
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Jul 17 09:38:55 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Jul 17 09:38:55 2017 -0400

----------------------------------------------------------------------
 .../RANGER/configuration/ranger-admin-site.xml        | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/606c5cab/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-admin-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-admin-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-admin-site.xml
index dcc652d..05dafd0 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-admin-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-admin-site.xml
@@ -568,4 +568,18 @@
     </value-attributes>
   </property>
 
+  <property>
+    <name>ranger.ldap.binddn.credential.alias</name>
+    <value>ranger.ldap.bind.password</value>
+    <description></description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.ldap.ad.binddn.credential.alias</name>
+    <value>ranger.ldap.ad.bind.password</value>
+    <description></description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
 </configuration>


[23/50] [abbrv] ambari git commit: AMBARI-21463. Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (alejandro)

Posted by jo...@apache.org.
AMBARI-21463. Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d2c6d53f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d2c6d53f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d2c6d53f

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: d2c6d53f70bcaa6aee789e6d026cc06990acd16c
Parents: 113b381
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 12 16:53:12 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Jul 13 16:10:26 2017 -0700

----------------------------------------------------------------------
 .../libraries/functions/stack_tools.py          |  2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 26 ++++++++++++++++----
 .../4.0.0.2.0/package/scripts/oozie_server.py   |  4 +--
 .../package/scripts/oozie_server_upgrade.py     | 15 +++++------
 .../4.0.0.2.0/package/scripts/params_linux.py   | 15 ++++++++++-
 .../stacks/BigInsights/4.2.5/metainfo.xml       |  2 +-
 6 files changed, 47 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 830598b..de58021 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -19,7 +19,7 @@ limitations under the License.
 '''
 
 __all__ = ["get_stack_tool", "get_stack_tool_name", "get_stack_tool_path",
-           "get_stack_tool_package", "STACK_SELECTOR_NAME", "CONF_SELECTOR_NAME"]
+           "get_stack_tool_package", "get_stack_name", "STACK_SELECTOR_NAME", "CONF_SELECTOR_NAME"]
 
 # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import ambari_simplejson as json

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 0c38b0b..142e962 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -52,7 +52,7 @@ from ambari_commons.inet_utils import download_file
 from resource_management.core import Logger
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def oozie(is_server=False):
+def oozie(is_server=False, upgrade_type=None):
   import params
 
   from status_params import oozie_server_win_service_name
@@ -99,7 +99,7 @@ def oozie(is_server=False):
 
 # TODO: see if see can remove this
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def oozie(is_server=False):
+def oozie(is_server=False, upgrade_type=None):
   import params
 
   if is_server:
@@ -190,7 +190,7 @@ def oozie(is_server=False):
   oozie_ownership()
   
   if is_server:      
-    oozie_server_specific()
+    oozie_server_specific(upgrade_type)
   
 def oozie_ownership():
   import params
@@ -215,7 +215,20 @@ def oozie_ownership():
     group = params.user_group
   )
 
-def oozie_server_specific():
+def get_oozie_ext_zip_source_path(upgrade_type, params):
+  """
+  Get the Oozie ext zip file path from the source stack.
+  :param upgrade_type:  Upgrade type will be None if not in the middle of a stack upgrade.
+  :param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file
+  :return: Source path to use for Oozie extension zip file
+  """
+  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+  source_ext_js_path = params.ext_js_path
+  if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
+    source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file
+  return source_ext_js_path
+
+def oozie_server_specific(upgrade_type):
   import params
   
   no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
@@ -249,7 +262,10 @@ def oozie_server_specific():
   )
 
   configure_cmds = []
-  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
+  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+  source_ext_zip_path = get_oozie_ext_zip_source_path(upgrade_type, params)
+
+  configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
   configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
   
   Execute( configure_cmds,

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index 2cf3313..b3a8643 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -75,7 +75,7 @@ class OozieServer(Script):
         conf_select.select(params.stack_name, "oozie", params.version)
 
     env.set_params(params)
-    oozie(is_server=True)
+    oozie(is_server=True, upgrade_type=upgrade_type)
 
   def start(self, env, upgrade_type=None):
     import params
@@ -129,7 +129,7 @@ class OozieServerDefault(OozieServer):
       conf_select.select(params.stack_name, "oozie", params.version)
       stack_select.select("oozie-server", params.version)
 
-    OozieUpgrade.prepare_libext_directory()
+    OozieUpgrade.prepare_libext_directory(upgrade_type=upgrade_type)
 
   def disable_security(self, env):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index a1f3336..719fb32 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -41,7 +41,7 @@ BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
 class OozieUpgrade(Script):
 
   @staticmethod
-  def prepare_libext_directory():
+  def prepare_libext_directory(upgrade_type=None):
     """
     Performs the following actions on libext:
       - creates <stack-root>/current/oozie/libext and recursively
@@ -86,17 +86,18 @@ class OozieUpgrade(Script):
         raise Fail("There are no files at {0} matching {1}".format(
           hadoop_client_new_lib_dir, hadoop_lzo_pattern))
 
-    # copy ext ZIP to libext dir
-    oozie_ext_zip_file = params.ext_js_path
+    # Copy ext ZIP to libext dir
+    # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+    source_ext_zip_path = oozie.get_oozie_ext_zip_source_path(upgrade_type, params)
 
     # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
     oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
 
-    if not os.path.isfile(oozie_ext_zip_file):
-      raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
+    if not os.path.isfile(source_ext_zip_path):
+      raise Fail("Unable to copy {0} because it does not exist".format(source_ext_zip_path))
 
-    Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_dir))
-    Execute(("cp", oozie_ext_zip_file, params.oozie_libext_dir), sudo=True)
+    Logger.info("Copying {0} to {1}".format(source_ext_zip_path, params.oozie_libext_dir))
+    Execute(("cp", source_ext_zip_path, params.oozie_libext_dir), sudo=True)
     Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
     File(oozie_ext_zip_target_path,
          mode=0644

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index 0c30c78..110d55a 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -30,11 +30,11 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.functions.get_architecture import get_architecture
 from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.stack_tools import get_stack_name
 
 from resource_management.core.utils import PasswordString
 from ambari_commons.credential_store_helper import get_password_from_credential_store
@@ -65,6 +65,13 @@ agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_ret
 agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
 
 stack_root = status_params.stack_root
+
+# The source stack will be present during a cross-stack upgrade.
+# E.g., BigInsights-4.2.5 or HDP-2.6
+source_stack = default("/commandParams/source_stack", None)
+# This variable name is important, do not change
+source_stack_name = get_stack_name(source_stack)
+
 stack_version_unformatted =  status_params.stack_version_unformatted
 stack_version_formatted =  status_params.stack_version_formatted
 version_for_stack_feature_checks = get_stack_feature_version(config)
@@ -142,8 +149,14 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 java_share_dir = "/usr/share/java"
 java64_home = config['hostLevelParams']['java_home']
 java_exec = format("{java64_home}/bin/java")
+
+# This variable name is important, do not change
 ext_js_file = "ext-2.2.zip"
+
+# During a cross-stack migration, the source location will be different
+# This variable name is important, do not change
 ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
 oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2c6d53f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
index 3cf364e..789c63f 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <versions>
-	  <active>true</active>
+	  <active>false</active>
     </versions>
     <extends>4.2</extends>
     <minJdk>1.7</minJdk>


[28/50] [abbrv] ambari git commit: AMBARI-21470 : Kafka Sink does not exclude excluded metrics of type 'guage'. (Qin Liu via avijayan)

Posted by jo...@apache.org.
AMBARI-21470 : Kafka Sink does not exclude excluded metrics of type 'guage'. (Qin Liu via avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f0bba69
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f0bba69
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f0bba69

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 9f0bba698b9b10006665571bf6ca9f39292a0232
Parents: 13a6fa8
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri Jul 14 09:46:18 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri Jul 14 09:46:27 2017 -0700

----------------------------------------------------------------------
 .../metrics2/sink/kafka/KafkaTimelineMetricsReporter.java      | 6 ++++--
 .../common-services/KAFKA/0.9.0/configuration/kafka-broker.xml | 2 +-
 2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f0bba69/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 4ea7b73..20021e6 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -389,8 +389,10 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
       final String sanitizedName = sanitizeName(name);
 
       try {
-        cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
-        populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+        if (!isExcludedMetric(sanitizedName)) {
+          cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
+          populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+        }
       } catch (NumberFormatException ex) {
         LOG.debug(ex.getMessage());
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f0bba69/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
index 0275358..f15dcad 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
@@ -138,7 +138,7 @@
   </property>
   <property>
     <name>external.kafka.metrics.exclude.prefix</name>
-    <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec</value>
+    <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec,kafka.server.KafkaServer.ClusterId</value>
     <description>
       Exclude metrics starting with these prefixes from being collected.
     </description>


[26/50] [abbrv] ambari git commit: AMBARI-21234. Ambari rack awareness for Kafka. (Ambud Sharma via stoader)

Posted by jo...@apache.org.
AMBARI-21234. Ambari rack awareness for Kafka. (Ambud Sharma via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/84b3c713
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/84b3c713
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/84b3c713

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 84b3c7139486e733a5fbc3de628e51510eb0b261
Parents: bc06736
Author: Ambud Sharma <am...@hortonworks.com>
Authored: Fri Jul 14 14:28:55 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Fri Jul 14 14:28:55 2017 +0200

----------------------------------------------------------------------
 .../0.10.0/configuration/ranger-kafka-audit.xml | 58 ++++++++++++++
 .../common-services/KAFKA/0.10.0/kerberos.json  | 79 ++++++++++++++++++++
 .../common-services/KAFKA/0.10.0/metainfo.xml   | 28 +++++++
 .../KAFKA/0.8.1/package/scripts/kafka.py        | 12 +++
 .../KAFKA/0.8.1/package/scripts/params.py       |  3 +
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml  |  1 +
 6 files changed, 181 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/configuration/ranger-kafka-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/configuration/ranger-kafka-audit.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/configuration/ranger-kafka-audit.xml
new file mode 100644
index 0000000..fff9132
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/configuration/ranger-kafka-audit.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/kerberos.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/kerberos.json
new file mode 100644
index 0000000..b4d0018
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/kerberos.json
@@ -0,0 +1,79 @@
+{
+  "services": [
+    {
+      "name": "KAFKA",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "kafka-broker": {
+              "authorizer.class.name": "kafka.security.auth.SimpleAclAuthorizer",
+              "principal.to.local.class":"kafka.security.auth.KerberosPrincipalToLocal",
+              "super.users": "user:${kafka-env/kafka_user}",
+              "security.inter.broker.protocol": "PLAINTEXTSASL",
+              "zookeeper.set.acl": "true",
+              "listeners": "${kafka-broker/listeners|replace(\\bPLAINTEXT\\b, PLAINTEXTSASL)}"
+          }
+        },
+        {
+          "ranger-kafka-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "auth_to_local_properties" : [
+        "kafka-broker/sasl.kerberos.principal.to.local.rules|comma"
+      ],
+      "components": [
+        {
+          "name": "KAFKA_BROKER",
+          "identities": [
+            {
+              "name": "kafka_broker",
+              "principal": {
+                "value": "${kafka-env/kafka_user}/_HOST@${realm}",
+                "type": "service",
+                "configuration": "kafka-env/kafka_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/kafka.service.keytab",
+                "owner": {
+                  "name": "${kafka-env/kafka_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "kafka-env/kafka_keytab"
+              }
+            },
+            {
+              "name": "/KAFKA/KAFKA_BROKER/kafka_broker",
+              "principal": {
+                "configuration": "ranger-kafka-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-kafka-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs",
+              "when" : {
+                "contains" : ["services", "HDFS"]
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
new file mode 100644
index 0000000..c1fcde8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <extends>common-services/KAFKA/0.9.0</extends>
+      <version>0.10.0</version>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
index 1327090..e6d7339 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
@@ -108,6 +108,18 @@ def kafka(upgrade_type=None):
 
     kafka_data_dir = kafka_server_config['log.dirs']
     kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+
+    rack="/default-rack"
+    i=0
+    if len(params.all_racks) > 0:
+     for host in params.all_hosts:
+      if host == params.hostname:
+        rack=params.all_racks[i]
+        break
+      i=i+1
+
+    kafka_server_config['broker.rack']=rack
+
     Directory(kafka_data_dirs,
               mode=0755,
               cd_access='a',

http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index c36a10f..8aa4fc2 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -105,6 +105,9 @@ zookeeper_hosts.sort()
 secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
 kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
 
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+
 #Kafka log4j
 kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
 kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)

http://git-wip-us.apache.org/repos/asf/ambari/blob/84b3c713/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml
index 12f6c45..14367ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml
@@ -21,6 +21,7 @@
     <service>
       <name>KAFKA</name>
       <version>0.10.0.2.5</version>
+      <extends>common-services/KAFKA/0.10.0</extends>
     </service>
   </services>
 </metainfo>


[13/50] [abbrv] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/69e492f2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/69e492f2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/69e492f2

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 69e492f288340e797cce62bfd42e677bec958158
Parents: 1f54c6e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 12 15:14:30 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Jul 12 16:17:07 2017 -0700

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  37 ++--
 .../common-services/JNBG/0.2.0/alerts.json      |  32 +++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 208 +++++++++++++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 ++++++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 ++++++++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++++++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 +++++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 ++++++++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 ++++++++++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 ++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 ++++++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 ++++++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 +++
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 +++++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 +++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 +++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 ++
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 ++
 .../R4ML/0.8.0/package/scripts/params.py        |  80 +++++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 ++++++++++++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++++
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 +++++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 ++
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++++
 .../0.10.0/package/scripts/service_check.py     |  43 ++++
 .../0.10.0/package/scripts/systemml_client.py   |  49 +++++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 +++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 +++++++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 ++++++++++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 ++++++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 ++++++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++++++++
 .../package/alerts/alert_check_titan_server.py  |  65 ++++++
 .../package/files/gremlin-server-script.sh      |  86 ++++++++
 .../package/files/tinkergraph-empty.properties  |  18 ++
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 ++
 .../TITAN/1.0.0/package/scripts/params.py       | 202 ++++++++++++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 +++++++++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 ++++++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 ++++++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 ++
 .../package/templates/titan_solr_jaas.conf.j2   |  26 +++
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 +++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++++
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++++
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 +++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |   2 +-
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 +++++++
 .../SYSTEMML/package/scripts/__init__.py        |  19 ++
 .../services/SYSTEMML/package/scripts/params.py |  40 ++++
 .../SYSTEMML/package/scripts/service_check.py   |  43 ++++
 .../SYSTEMML/package/scripts/systemml_client.py |  49 +++++
 .../services/TITAN/configuration/titan-env.xml  |  46 ++++
 .../TITAN/configuration/titan-hbase-solr.xml    |  66 ++++++
 .../TITAN/configuration/titan-log4j.xml         |  65 ++++++
 .../4.2/services/TITAN/kerberos.json            |  17 ++
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 ++++++++
 .../TITAN/package/files/titanSmoke.groovy       |  20 ++
 .../services/TITAN/package/scripts/params.py    | 128 ++++++++++++
 .../TITAN/package/scripts/service_check.py      |  64 ++++++
 .../4.2/services/TITAN/package/scripts/titan.py |  70 +++++++
 .../TITAN/package/scripts/titan_client.py       |  58 ++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |   2 +-
 81 files changed, 5583 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index 30674a8..8151572 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -19,10 +19,14 @@ limitations under the License.
 """
 
 import sys
-from resource_management import *
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import Service
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
 from hbase import hbase
 from hbase_service import hbase_service
 from hbase_decommission import hbase_decommission
@@ -31,6 +35,8 @@ from setup_ranger_hbase import setup_ranger_hbase
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
 
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
 
 class HbaseMaster(Script):
   def configure(self, env):
@@ -83,7 +89,7 @@ class HbaseMasterDefault(HbaseMaster):
     env.set_params(params)
     self.configure(env) # for security
     setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
-    hbase_service('master', action = 'start')
+    hbase_service('master', action='start')
     
   def stop(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index 3b8e494..1d618ed 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -17,14 +17,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from datetime import datetime
 
-from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions.show_logs import show_logs
 from resource_management.core.logger import Logger
 
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
+def hbase_service(name, action='start'):
     import params
   
     role = name
@@ -36,18 +39,28 @@ def hbase_service(
     # delete wal log if HBase version has moved down
     if params.to_backup_wal_dir:
       wal_directory = params.wal_directory
-      timestamp = datetime.datetime.now()
+      timestamp = datetime.now()
       timestamp_format = '%Y%m%d%H%M%S'
       wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
 
-      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
+      check_if_wal_dir_exists = format("hdfs dfs -ls {wal_directory}")
+      wal_dir_exists = False
       try:
-        Execute ( rm_cmd,
-          user = params.hbase_user
-        )
+        Execute(check_if_wal_dir_exists,
+                user=params.hbase_user
+                )
+        wal_dir_exists = True
       except Exception, e:
-        #Should still allow HBase Start/Stop to proceed
-        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
+        Logger.error(format("Did not find HBase WAL directory {wal_directory}. It's possible that it was already moved. Exception: {e.message}"))
+
+      if wal_dir_exists:
+        move_wal_dir_cmd = format("hdfs dfs -mv {wal_directory} {wal_directory_backup}")
+        try:
+          Execute(move_wal_dir_cmd,
+            user=params.hbase_user
+          )
+        except Exception, e:
+          Logger.error(format("Failed to backup HBase WAL directory, command: {move_wal_dir_cmd} . Exception: {e.message}"))
 
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
new file mode 100755
index 0000000..963c687
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "JNBG": {
+    "service": [],
+    "KERNEL_GATEWAY": [
+      {
+        "name": "jupyter_kernel_gateway",
+        "label": "Jupyter Kernel Gateway Process",
+        "description": "This host-level alert is triggered if the Jupyter Kernel Gateway cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{jnbg-env/jnbg_port}}",
+          "default_port": 8888,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
new file mode 100755
index 0000000..f9da01e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+  <property>
+    <name>kernel_spark_opts</name>
+    <value>--master=yarn --deploy-mode=client --driver-java-options="-Dlog4j.logFile=/var/log/jnbg/spark-driver-USER.log -Dlog4j.configuration=file:/var/lib/jnbg/conf/log4j.properties"</value>
+    <display-name>spark_opts</display-name>
+    <description>
+      SPARK_OPTS used for all kernels (ToreeInstall.spark_opts, PYSPARK_SUBMIT_ARGS).
+      Optionally include -Dlog4j.logLevel and -Dlog4j.fileSize in --driver-java-options
+      to influence logging behavior. Default: -Dlog4j.logLevel=INFO -Dlog4j.fileSize=10MB
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>toree_opts</name>
+    <value></value>
+    <display-name>ToreeInstall.toree_opts</display-name>
+    <description>__TOREE_OPTS__ for Apache Toree kernel</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>notebook_user</name>
+    <value>notebook</value>
+    <display-name>Notebook service user</display-name>
+    <description>User to run JKG and kernel processes</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>notebook_group</name>
+    <value>notebook</value>
+    <display-name>Notebook service user group</display-name>
+    <description>Service user's group</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_home</name>
+    <value>/usr/iop/current/spark2-client</value>
+    <display-name>spark_home</display-name>
+    <description>SPARK_HOME for kernels</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_sql_warehouse_dir</name>
+    <value>/apps/jnbg/spark-warehouse</value>
+    <display-name>spark.sql.warehouse.dir</display-name>
+    <description>Warehouse for Notebook applications</description>
+  </property>
+  <property>
+    <name>jkg_port</name>
+    <value>8888</value>
+    <display-name>KernelGatewayApp.port</display-name>
+    <description>Jupyter Kernel Gateway port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_loglevel</name>
+    <value>INFO</value>
+    <display-name>Application.log_level</display-name>
+    <description>Jupyter Kernel Gateway Log level</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_pid_dir_prefix</name>
+    <value>/var/run/jnbg</value>
+    <display-name>JNBG pid directory prefix</display-name>
+    <description>JNBG pid directory prefix for storing process ID</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_log_dir</name>
+    <value>/var/log/jnbg</value>
+    <display-name>Kernel Gateway log directory</display-name>
+    <description>Jupyter Kernel Gateway logfile directory</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>max_kernels</name>
+    <value>15</value>
+    <display-name>Maximum number of kernel instances</display-name>
+    <description>
+      Limits the number of kernel instances allowed to run by this gateway.
+      Unbounded by default.
+
+      Note: Number of kernel instances is also affected by the Spark2 property spark.port.maxRetries. Increase spark.port.maxRetries from its default value to a much higher value to enable controlling the number of active kernel instances using max_kernels.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>cull_idle_kernel_period</name>
+    <value>43200</value>
+    <display-name>Idle kernel culling period</display-name>
+    <description>Period in seconds kernel can idle before being culled</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>cull_idle_kernel_interval</name>
+    <value>300</value>
+    <display-name>Idle kernel culling interval</display-name>
+    <description>Check for idle kernels to cull every specified number of seconds</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_interpreter_path</name>
+    <value>/usr/bin/python</value>
+    <display-name>Python interpreter path</display-name>
+    <description>
+      PYTHON_EXE for virtualenv
+      Python interpreter must be version 2.7.x
+    </description>
+    <value-attributes>
+      <type>file</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_virtualenv_path_prefix</name>
+    <value>/var/lib/jnbg</value>
+    <display-name>Python virtualenv path prefix</display-name>
+    <description>
+      Python virtualenv path prefix
+      $VIRTUAL_ENV=python_virtualenv_path_prefix/python2.7
+    </description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_virtualenv_restrictive</name>
+    <value>true</value>
+    <display-name>Python virtualenv restrictive mode</display-name>
+    <description> 
+      Python virtualenv restrictive mode.
+      Check for restrictive mode so that service users cannot modify it.
+      Uncheck so that service users can install packages with "pip install ..."
+    </description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>pythonpath</name>
+    <value>/usr/iop/current/spark2-client/python:/usr/iop/current/spark2-client/python/lib/pyspark.zip:/usr/iop/current/spark2-client/python/lib/py4j-0.10.4-src.zip</value>
+    <display-name>PYTHONPATH</display-name>
+    <description>PYTHONPATH for PySpark kernel</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_conf_dir</name>
+    <value>/var/lib/jnbg/conf</value>
+    <display-name>SPARK_CONF_DIR</display-name>
+    <description>Spark configuration directory, currently only contains log4j.properties (see "-Dlog4j.configuration=file:/var/lib/jnbg/conf/log4j.properties" in spark_opts)</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
new file mode 100755
index 0000000..8777709
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
@@ -0,0 +1,59 @@
+{
+  "services": [
+    {
+      "name": "JNBG",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "KERNEL_GATEWAY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "jnbg_principal",
+              "principal": {
+                "value": "${jnbg-env/notebook_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "jnbg-env/jnbg.service.kerberos.principal",
+                "local_username" : "${jnbg-env/notebook_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jnbg.service.keytab",
+                "owner": {
+                  "name": "${jnbg-env/notebook_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "jnbg-env/jnbg.service.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PYTHON_CLIENT",
+          "identities": [
+            {
+              "name": "/JNBG/KERNEL_GATEWAY/jnbg_principal"
+            }
+          ]
+        }
+      ],
+
+      "configurations": [
+        {
+          "jnbg-env": {
+            "jnbg.kerberos.enabled": "true"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
new file mode 100755
index 0000000..5afe904
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
@@ -0,0 +1,108 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>JNBG</name>
+      <displayName>JNBG</displayName>
+      <comment>Jupyter Notebook Kernel Gateway with Apache Toree</comment>
+      <version>0.2.0</version>
+      <components>
+        <component>
+          <name>KERNEL_GATEWAY</name>
+          <displayName>Jupyter Kernel Gateway</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/jkg_toree.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>3000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>JNBG/PYTHON_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+        <component>
+          <name>PYTHON_CLIENT</name>
+          <displayName>Python Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/py_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>3000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>SPARK/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>SPARK2</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>jnbg-env</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
new file mode 100755
index 0000000..2027c9f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]; then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]; then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+easy_install pip
+checkSuccess $LINENO "-  easy_install pip"
+pip -V
+
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+checkPipInstall virtualenv
+checkSuccess $LINENO "-  pip install virtualenv"
+
+if [ -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  # Warning only to tolerate pre-existing virtual env. from failed installs
+  echo "Installation warning: ${PY_VENV_PATH_PREFIX}/python2.7 exists."
+  echo "This might indicate remnants from a prior or failed installation."
+  echo "Check specified property value for python_virtualenv_path_prefix."
+fi
+
+if [ ! -x "${PY_EXEC}" ]; then
+  echo "Installation failed: ${PY_EXEC} does not appear to be a valid python executable; Use a different python_interpreter_path."
+  exit 1
+fi
+
+virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ "$rhver" -eq 6 ]; then
+  if [ "$rhscl" -eq 1 ]; then
+    pip -V
+    # uninstall older pip version that accompanies virtualenv with SCL
+    pip uninstall -y pip
+    easy_install pip
+    checkPipInstall pip
+    checkSuccess $LINENO "- easy_install pip"
+  fi
+fi
+
+pip -V
+# Use --index-url and not --extra-index-url as we are trying to install
+# specific package versions
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple setuptools --upgrade
+checkPipInstall setuptools
+checkSuccess $LINENO "- pip install setuptools"
+
+# Using --upgrade enables updating missing dependencies after failed installs
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ jupyter_kernel_gateway --upgrade
+checkPipInstall jupyter_kernel_gateway
+checkSuccess $LINENO "- pip install jupyter_kernel_gateway"
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
new file mode 100755
index 0000000..fdc9e59
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+START_CMD=$1
+SPARK_HOME=$2
+PY_EXEC=$3
+PY_VENV_PATH_PREFIX=$4
+KINIT_CMD=$5
+LOG=$6
+PIDFILE=$7
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Detected invalid installation state: Install Python 2.7 using Red Hat Software Collections and try reinstalling the service."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Detected invalid installation state: Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Did not find necessary virtual environment to execute service startup. This state in unexpected and inconsistent when the service is in the INSTALLED state. Delete the service and reinstall."
+  exit 1
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+# Required for supporting Python 2 kernel
+export PYTHONPATH=${SPARK_HOME}/python/lib/pyspark.zip:${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.10.4-src.zip
+
+export SPARK_CONF_DIR=$SPARK_HOME/conf
+source $SPARK_CONF_DIR/spark-env.sh
+set +x
+eval "$START_CMD >> $LOG 2>&1 &"
+if [ $? -eq 0 ]; then
+  echo $! > $PIDFILE
+  exit 0
+fi
+exit 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
new file mode 100755
index 0000000..921045d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+SPARK_CONFIG_DIR=$1
+
+log4j_properties_file="${SPARK_CONFIG_DIR}/log4j.properties"
+
+cat <<'EOF' > "${log4j_properties_file}"
+
+# default log file, overridden by Java System property -Dlog4j.logFile=...
+log4j.logFile=/var/log/jnbg/spark-driver-${user.name}.log
+
+# default (root) log level, overridable by Java System property -Dlog4j.logLevel=...
+log4j.logLevel=INFO
+
+# default log file size limit, overridable by Java System property -Dlog4j.fileSize=... (KB, MB, GB)
+log4j.fileSize=10MB
+
+# default max number of log file backups, overridable by Java System property -Dlog4j.backupFiles=...
+log4j.backupFiles=10
+
+# log to file using rolling log strategy with one backup file
+# NOTE: Spark REPL overrides rootCategory, set log4j.logLevel above
+log4j.rootCategory=${log4j.logLevel}, logfile
+log4j.appender.logfile=org.apache.log4j.RollingFileAppender
+log4j.appender.logfile.File=${log4j.logFile}
+log4j.appender.logfile.MaxFileSize=${log4j.fileSize}
+log4j.appender.logfile.MaxBackupIndex=${log4j.backupFiles}
+log4j.appender.logfile.encoding=UTF-8
+log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
+log4j.appender.logfile.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
+
+# Reduce Toree related "noise"
+log4j.logger.org.apache.toree.kernel.protocol.v5.stream.KernelOutputStream=ERROR
+
+# Modified Spark 2.1 default settings:
+
+# Spark overrides rootCategory level with the level set for the Scala & PySpark REPLs (default=WARN)
+# This is intended to reduce log verbosity while working with a Spark shell or PySpark shell.
+# However, notebook kernels internally use the spark-shell and pyspark shell implementation, but
+# since notebooks are logging to a log file, we want potentially more verbose logs.
+# We need to set the spark-shell and pyspark shell log level to the same level as the rootCategory.
+# See: org.apache.spark.internal.Logging#initializeLogging(isInterpreter=true)
+log4j.logger.org.apache.spark.repl.Main=${log4j.rootCategory}
+log4j.logger.org.apache.spark.api.python.PythonGatewayServer=${log4j.rootCategory}
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark_project.jetty=WARN
+log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+
+EOF

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
new file mode 100755
index 0000000..59cd28d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+SPARK_HOME=$5
+PYTHONPATH=$6
+SPARK_OPTS=$7
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Unexpected state of installation. No Python client installation detected while trying to install PySpark kernel."
+  exit 0
+fi
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+if [ -z "${VIRTUAL_ENV}" ]; then
+  echo "Unexpected condition detected; Unable to find virtualenv environment."
+  exit 1
+fi
+
+# assume --sys-prefix used for Toree kernel installs
+kernel_dir=${VIRTUAL_ENV}/share/jupyter/kernels/spark_2.1_python2
+kernel_run_file=$kernel_dir/bin/run.sh
+kernel_json_file=$kernel_dir/kernel.json
+
+mkdir -p $kernel_dir/bin
+rm -f $kernel_json_file $kernel_run_file
+
+cat <<'EOF' >> $kernel_run_file
+#!/usr/bin/env bash
+echo
+echo "Starting Python 2 kernel with Spark 2.1 for user ${KERNEL_USERNAME}"
+echo
+
+CONF_ARGS="--name '${KERNEL_USERNAME:-Notebook} Python' \
+           --conf spark.sql.catalogImplementation=in-memory"
+
+PYSPARK_SUBMIT_ARGS="${CONF_ARGS} ${PYSPARK_SUBMIT_ARGS}"
+
+# replace generic log file name with user-specific log file name based on authenticated end-user
+PYSPARK_SUBMIT_ARGS="${PYSPARK_SUBMIT_ARGS//spark-driver-USER.log/spark-driver-${KERNEL_USERNAME:-all}.log}"
+
+echo "PYSPARK_SUBMIT_ARGS=\"${PYSPARK_SUBMIT_ARGS}\""
+
+EOF
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "$ a ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+sed -i "$ a ${PY_VENV_PATH_PREFIX}/python2.7/bin/python2 -m ipykernel -f \${2}" $kernel_run_file
+
+chmod 755 $kernel_run_file
+
+# Escape double-quotes in the user specified SPARK_OPTS value
+SPARK_OPTS="${SPARK_OPTS//\"/\\\"}"
+
+cat <<EOF >> $kernel_json_file
+{
+  "language": "python",
+  "display_name": "Spark 2.1 - Python 2",
+  "env": {
+    "SPARK_HOME": "${SPARK_HOME}",
+    "PYTHONPATH": "${PYTHONPATH}",
+    "PYTHONSTARTUP": "${SPARK_HOME}/python/pyspark/shell.py",
+    "PYSPARK_SUBMIT_ARGS": "${SPARK_OPTS} pyspark-shell"
+  },
+  "argv": [
+    "$kernel_run_file",
+    "-f",
+    "{connection_file}"
+  ]
+}
+EOF
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
new file mode 100755
index 0000000..5b2b7d9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
@@ -0,0 +1,138 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+if [ -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Python client installation detected. Nothing to do."
+  exit 0
+fi
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      #Install Python 2.7 using Red Hat Software Collections and retry."
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+easy_install pip
+checkSuccess $LINENO "-  easy_install pip"
+pip -V
+
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+checkPipInstall virtualenv
+checkSuccess $LINENO "-  pip install virtualenv"
+
+virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
new file mode 100755
index 0000000..8f4cbb3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+NBX_USER=$1
+PY_EXEC=$2
+PY_VENV_PATH_PREFIX=$3
+PY_VENV_OWNER=$4
+KINIT_CMD=$5
+SPARK_HOME=$6
+TOREE_INTERPRETERS=$7
+TOREE_OPTS=${8:-""}
+SPARK_OPTS=$9
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Configuration failed; Expected Python 2.7 from Red Hat Software Collections was not found."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Configuration failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Configuration failed as the virtualenv ${PY_VENV_PATH_PREFIX}/python2.7 was not found; Ensure that the installation was usccessful."
+  exit 1
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ -z "${TOREE_OPTS}" ]; then
+  jupyter toree install --sys-prefix --spark_home=${SPARK_HOME} --kernel_name='Spark 2.1' --interpreters=${TOREE_INTERPRETERS} "--spark_opts=${SPARK_OPTS}"
+  checkSuccess $LINENO "-  jupyter toree install"
+else
+  jupyter toree install --sys-prefix --spark_home=${SPARK_HOME} --kernel_name='Spark 2.1' --interpreters=${TOREE_INTERPRETERS} "--toree_opts=${TOREE_OPTS}" "--spark_opts=${SPARK_OPTS}"
+  checkSuccess $LINENO "-  jupyter toree install"
+fi
+
+# Note the value of --kernel_name and --interpreters from the toree install command determines the kernel directory
+# i.e. --kernel_name='Spark 2.1' --interpreters='Scala' --> .../jupyter/kernels/spark_2.1_scala/
+kernel_dir=${PY_VENV_PATH_PREFIX}/python2.7/share/jupyter/kernels/spark_2.1_scala
+kernel_run_file=$kernel_dir/bin/run.sh
+
+# Include the end-user name for spark-submit application name (KERNEL_USERNAME env var set by nb2kg)
+sed -i "s/--name \"'Apache Toree'\"/--name \"'\${KERNEL_USERNAME:-Notebook} Scala'\"/" $kernel_run_file
+
+# Replace log file path in SPARK_OPTS
+sed -i "/eval exec/i SPARK_OPTS=\"\${SPARK_OPTS//spark-driver-USER.log/spark-driver-\${KERNEL_USERNAME:-all}.log}\"\n" $kernel_run_file
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "/eval exec/i ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
new file mode 100755
index 0000000..7967105
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
@@ -0,0 +1,176 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+SPARK_HOME=$5
+SPARK_OPTS=$6
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  easy_install pip
+  checkSuccess $LINENO "-  easy_install pip"
+
+  pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+  checkPipInstall virtualenv
+  checkSuccess $LINENO "-  pip install virtualenv"
+
+  virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+  checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ "$rhver" -eq 6 ]; then
+  if [ "$rhscl" -eq 1 ]; then
+    pip -V
+    # uninstall older pip version that accompanies virtualenv with SCL
+    pip uninstall -y pip
+    easy_install pip
+    checkPipInstall pip
+    checkSuccess $LINENO "- easy_install pip"
+  fi
+fi
+
+# Use --index-url and not --extra-index-url as we are trying to install
+# specific package versions
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ setuptools --upgrade
+checkPipInstall setuptools
+checkSuccess $LINENO "- pip install setuptools"
+
+# Using --upgrade enables updating missing dependencies after failed installs
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ toree --upgrade
+checkPipInstall toree
+checkSuccess $LINENO "- pip install toree"
+
+# Note the value of --kernel_name and --interpreters from the toree install command determines the kernel directory
+# i.e. --kernel_name='Spark 2.1' --interpreters='Scala' --> .../jupyter/kernels/spark_2.1_scala/
+kernel_dir=${PY_VENV_PATH_PREFIX}/python2.7/share/jupyter/kernels/spark_2.1_scala
+kernel_run_file=$kernel_dir/bin/run.sh
+
+# Include the end-user name for spark-submit application name (KERNEL_USERNAME env var set by nb2kg)
+sed -i "s/--name \"'Apache Toree'\"/--name \"'\${KERNEL_USERNAME:-Notebook} Scala'\"/" $kernel_run_file
+
+# Replace log file path in SPARK_OPTS
+sed -i "/eval exec/i SPARK_OPTS=\"\${SPARK_OPTS//spark-driver-USER.log/spark-driver-\${KERNEL_USERNAME:-all}.log}\"\n" $kernel_run_file
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "/eval exec/i ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
new file mode 100755
index 0000000..34bcfe1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os, errno
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.libraries.functions.check_process_status import check_process_status
+import jnbg_helpers as helpers
+
+class GatewayKernels(Script):
+  def install(self, env):
+    import jkg_toree_params as params
+    self.install_packages(env)
+
+    # Create user and group if they don't exist
+    helpers.create_linux_user(params.user, params.group)
+
+    # Create directories used by the service and service user
+    Directory([params.home_dir, params.jkg_pid_dir, params.log_dir, params.spark_config_dir],
+              mode=0755,
+              create_parents=True,
+              owner=params.user,
+              group=params.group,
+              recursive_ownership=True
+             )
+
+    if os.path.exists(params.py_venv_pathprefix):
+      Logger.warning("Virtualenv path prefix {0} to be used for JNBG service might already exist."
+                     "This is unexpected if the service or service component is being installed on the node for the first time."
+                     "It could indicate remnants from a prior installation.".format(params.py_venv_pathprefix))
+
+    # Setup bash scripts for execution
+    for sh_script in params.sh_scripts:
+      File(params.sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0750
+          )
+    for sh_script in params.sh_scripts_user:
+      File(params.sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0755
+          )
+
+    # Run install commands for JKG defined in params
+    for command in params.jkg_commands: Execute(command, logoutput=True)
+
+    # Run install commands for Toree defined in params
+    for command in params.toree_commands: Execute(command, logoutput=True)
+
+    # Run setup commands for log4j
+    for command in params.log4j_setup_commands: Execute(command, logoutput=True)
+
+    # Note that configure is done during startup
+
+  def stop(self, env):
+    import status_params as params
+    import jkg_toree_params as jkgparams
+    env.set_params(params)
+
+    helpers.stop_process(params.jkg_pid_file, jkgparams.user, jkgparams.log_dir)
+
+  def start(self, env):
+    import os, sys, time
+    import jkg_toree_params as params
+    env.set_params(params)
+    self.configure(env)
+    delay_checks = 8
+
+    # Need HDFS started for the next step
+    helpers.create_hdfs_dirs(params.user, params.group, params.dirs)
+
+    Execute(params.start_command, user=params.user, logoutput=True)
+    check_process_status(params.jkg_pid_file)
+
+    time.sleep(delay_checks)
+
+    with open(params.jkg_pid_file, 'r') as fp:
+      try:
+        os.kill(int(fp.read().strip()), 0)
+      except OSError as ose:
+        if ose.errno != errno.EPERM:
+          raise Fail("Error starting Jupyter Kernel Gateway. Check {0} for the possible cause.".format(params.log_dir + "/jupyter_kernel_gateway.log"))
+        else:
+          # non-root install might have to resort to status check but
+          # with the side-effect that any error might only reflected during
+          # the status check after a minute rather than immediately 
+          check_process_status(params.jkg_pid_file)
+
+  def status(self, env):
+    import status_params as params
+    env.set_params(params)
+    check_process_status(params.jkg_pid_file)
+
+  def configure(self, env):
+    import jkg_toree_params as params
+    env.set_params(params)
+
+    # Create directories used by the service and service user
+    # if they were updated
+    Directory([params.home_dir, params.jkg_pid_dir, params.log_dir],
+              mode=0755,
+              create_parents=True,
+              owner=params.user,
+              group=params.group,
+              recursive_ownership=True)
+
+    # Run commands to configure Toree and PySpark
+    for command in params.toree_configure_commands: Execute(command, logoutput=True)
+    for command in params.pyspark_configure_commands: Execute(command, logoutput=True)
+
+if __name__ == "__main__":
+  GatewayKernels().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
new file mode 100755
index 0000000..13a8aba
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+import jnbg_helpers as helpers
+
+# Server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+cluster_configs = config['clusterHostInfo']
+
+# Notebook service configs
+user = config['configurations']['jnbg-env']['notebook_user']
+group = config['configurations']['jnbg-env']['notebook_group']
+log_dir = config['configurations']['jnbg-env']['jkg_log_dir']
+jkg_pid_dir = config['configurations']['jnbg-env']['jkg_pid_dir_prefix']
+jkg_host = str(cluster_configs['kernel_gateway_hosts'][0])
+jkg_port = str(config['configurations']['jnbg-env']['jkg_port'])
+jkg_loglevel = str(config['configurations']['jnbg-env']['jkg_loglevel'])
+jkg_max_kernels = config['configurations']['jnbg-env']['max_kernels']
+jkg_cull_period = config['configurations']['jnbg-env']['cull_idle_kernel_period']
+jkg_cull_interval = config['configurations']['jnbg-env']['cull_idle_kernel_interval']
+py_executable = config['configurations']['jnbg-env']['python_interpreter_path']
+py_venv_pathprefix = config['configurations']['jnbg-env']['python_virtualenv_path_prefix']
+py_venv_restrictive = config['configurations']['jnbg-env']['python_virtualenv_restrictive']
+spark_sql_warehouse_dir = config['configurations']['jnbg-env']['spark_sql_warehouse_dir']
+pythonpath = config['configurations']['jnbg-env']['pythonpath']
+spark_home = format("{stack_root}/current/spark2-client")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+#ui_ssl_enabled = config['configurations']['jnbg-env']['jnbg.ssl']
+ui_ssl_enabled = False
+spark_opts = str(config['configurations']['jnbg-env']['kernel_spark_opts'])
+modified_spark_opts = format("{spark_opts} --conf spark.sql.warehouse.dir={spark_sql_warehouse_dir}")
+modified_spark_opts = "'{0}'".format(modified_spark_opts)
+toree_opts = str(config['configurations']['jnbg-env']['toree_opts'])
+toree_opts = "'{0}'".format(toree_opts)
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+jkg_log_formatter_cmd = format("%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s")
+jkg_log_formatter_cmd = "'{0}'".format(jkg_log_formatter_cmd)
+venv_owner="root" if py_venv_restrictive else user
+spark_config_dir = config['configurations']['jnbg-env']['spark_conf_dir']
+interpreters = "Scala"
+
+jnbg_kinit_cmd = ""
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  jnbg_kerberos_keytab = config['configurations']['jnbg-env']['jnbg.service.kerberos.keytab']
+  jnbg_kerberos_principal = config['configurations']['jnbg-env']['jnbg.service.kerberos.principal']
+  jnbg_kerberos_principal = jnbg_kerberos_principal.replace('_HOST',_hostname_lowercase)
+  jnbg_kinit_cmd = format("{kinit_path_local} -kt {jnbg_kerberos_keytab} {jnbg_kerberos_principal}; ")
+
+jnbg_kinit_arg = "'{0}'".format(jnbg_kinit_cmd)
+
+ambarisudo = AMBARI_SUDO_BINARY
+home_dir = format("/home/{user}")
+hdfs_home_dir = format("/user/{user}")
+jkg_pid_file = format("{jkg_pid_dir}/jupyter_kernel_gateway.pid")
+dirs = [(hdfs_home_dir, "0775"), (spark_sql_warehouse_dir, "01770")]
+package_dir = helpers.package_dir()
+sh_scripts_dir = format("{package_dir}files/")
+sh_scripts = ['jkg_install.sh',
+              'toree_install.sh',
+              'log4j_setup.sh',
+              'toree_configure.sh',
+              'pyspark_configure.sh',
+              'pythonenv_setup.sh']
+sh_scripts_user = ['jkg_start.sh']
+
+# Sequence of commands to be executed for JKG installation
+jkg_commands = []
+cmd_file_name = "jkg_install.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+jkg_commands.append(ambarisudo + ' ' +
+                    cmd_file_path + ' ' +
+                    py_executable + ' ' +
+                    py_venv_pathprefix + ' ' +
+                    venv_owner + ' ' +
+                    jnbg_kinit_arg)
+
+# Sequence of commands executed for Toree installation
+toree_commands = []
+cmd_file_name = "toree_install.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+toree_commands.append(ambarisudo + ' ' +
+                      cmd_file_path + ' ' +
+                      py_executable + ' ' +
+                      py_venv_pathprefix + ' ' +
+                      venv_owner + ' ' +
+                      jnbg_kinit_arg + ' ' +
+                      spark_home + ' ' +
+                      modified_spark_opts)
+
+# Sequence of commands executed for Toree configuration
+toree_configure_commands = []
+cmd_file_name = "toree_configure.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+toree_configure_commands.append(ambarisudo + ' ' +
+                                cmd_file_path + ' ' +
+                                user + ' ' +
+                                py_executable + ' ' +
+                                py_venv_pathprefix + ' ' +
+                                venv_owner + ' ' +
+                                jnbg_kinit_arg + ' ' +
+                                spark_home + ' ' +
+                                interpreters + ' ' +
+                                toree_opts + ' ' +
+                                modified_spark_opts)
+
+# Sequence of commands executed for PySpark kernel configuration
+pyspark_configure_commands = []
+cmd_file_name = "pyspark_configure.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+pyspark_configure_commands.append(ambarisudo + ' ' +
+                                  cmd_file_path + ' ' +
+                                  py_executable + ' ' +
+                                  py_venv_pathprefix + ' ' +
+                                  venv_owner + ' ' +
+                                  jnbg_kinit_arg + ' ' +
+                                  spark_home + ' ' +
+                                  pythonpath + ' ' +
+                                  modified_spark_opts)
+
+log4j_setup_commands = []
+cmd_file_name = "log4j_setup.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+log4j_setup_commands.append(ambarisudo + ' ' +
+                            cmd_file_path + ' ' +
+                            spark_config_dir)
+
+# JKG startup command
+start_args = ['"jupyter kernelgateway' +
+              ' --ip=' + '0.0.0.0' +
+              ' --port=' + jkg_port +
+              ' --port_retries=' + '0' +
+              ' --log-level=' + jkg_loglevel +
+              ' --KernelGatewayApp.max_kernels=' + jkg_max_kernels,
+              ' --KernelGatewayApp.cull_idle_kernel_period=' + jkg_cull_period,
+              ' --KernelGatewayApp.cull_idle_kernel_interval=' + jkg_cull_interval,
+              ' --KernelSpecManager.ensure_native_kernel=' + 'False',
+              ' --KernelGatewayApp.log_format=' + jkg_log_formatter_cmd,
+              ' --JupyterWebsocketPersonality.list_kernels=' + 'True "',
+              spark_home,
+              py_executable,
+              py_venv_pathprefix,
+              jnbg_kinit_arg,
+              log_dir + "/jupyter_kernel_gateway.log",
+              jkg_pid_file]
+
+cmd_file_name = "jkg_start.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+start_command = cmd_file_path + ' ' + ' '.join(start_args)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
new file mode 100755
index 0000000..4d126e3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os, pwd, grp
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.libraries.functions.show_logs import show_logs
+#from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+def package_dir():
+  return os.path.realpath(__file__).split('/package')[0] + '/package/'
+
+def create_linux_user(user, group):
+  sudo = AMBARI_SUDO_BINARY
+
+  try: pwd.getpwnam(user)
+  except KeyError: Execute(format("{sudo} useradd ") + user, logoutput=True)
+  try: grp.getgrnam(group)
+  except KeyError: Execute(format("{sudo} groupadd ") + group, logoutput=True)
+
+def create_hdfs_dirs(user, group, dirs):
+  import jnbg_params as params
+  for dir, perms in dirs:
+    params.HdfsResource(dir,
+                        type = "directory",
+                        action = "create_on_execute",
+                        owner = user,
+                        group = group,
+                        mode = int(perms, 8)
+                       )
+  params.HdfsResource(None, action="execute")
+ 
+def stop_process(pid_file, user, log_dir):
+  """
+  Kill the process by pid file, then check the process is running or not.
+  If the process is still running after the kill command, try to kill
+  with -9 option (hard kill)
+  """
+
+  sudo = AMBARI_SUDO_BINARY
+  pid = get_user_call_output(format("cat {pid_file}"), user=user, is_checked_call=False)[1]
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
+
+  kill_cmd = format("{sudo} kill {pid}")
+  Execute(kill_cmd, not_if=format("! ({process_id_exists_command})"))
+
+  wait_time = 5
+  hard_kill_cmd = format("{sudo} kill -9 {pid}")
+  Execute(hard_kill_cmd,
+          not_if=format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
+          ignore_failures = True)
+
+  try:
+    Execute(format("! ({process_id_exists_command})"),
+            tries=20,
+            try_sleep=3)
+  except:
+    show_logs(log_dir, user)
+    raise
+
+  File(pid_file, action="delete")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
new file mode 100755
index 0000000..82660ab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+import functools
+
+#for create_hdfs_directory
+
+# server configurations
+config = Script.get_config()
+
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to import this and call HdfsResource in code
+
+HdfsResource = functools.partial(
+ HdfsResource,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  user = hdfs_user,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
new file mode 100755
index 0000000..094edde
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+import jnbg_helpers as helpers
+
+class PyClient(Script):
+  def install(self, env):
+    import py_client_params as params
+    from jkg_toree_params import user, group, sh_scripts_dir, sh_scripts, sh_scripts_user
+
+    # Setup bash scripts for execution
+    for sh_script in sh_scripts:
+      File(sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0750
+          )
+    for sh_script in sh_scripts_user:
+      File(sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0755
+          )
+
+    self.install_packages(env)
+    self.configure(env)
+
+    # Create user and group if they don't exist
+    helpers.create_linux_user(user, group)
+
+    # Run install commands for Python client defined in params
+    for command in params.commands: Execute(command, logoutput=True)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import py_client_params as params
+    env.set_params(params)
+
+if __name__ == "__main__":
+  PyClient().execute()


[27/50] [abbrv] ambari git commit: AMBARI-19038. Support migration of LDAP users & groups to PAM (rlevas)

Posted by jo...@apache.org.
AMBARI-19038. Support migration of LDAP users & groups to PAM (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/13a6fa84
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/13a6fa84
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/13a6fa84

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 13a6fa84542e9f69f85c171007b73636f5130a09
Parents: 84b3c71
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 09:57:35 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 09:57:35 2017 -0400

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |   3 +
 ambari-server/pom.xml                           |   2 +-
 ambari-server/sbin/ambari-server                |   6 +-
 .../LdapToPamMigrationHelper.java               |  73 ++++++++++++
 .../server/security/authorization/Users.java    |   4 +
 ambari-server/src/main/python/ambari-server.py  |  10 +-
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../main/python/ambari_server/setupSecurity.py  | 119 ++++++++++++++++---
 8 files changed, 195 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
index c61e71c..e8c18ad 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
@@ -68,6 +68,7 @@ angular.module('ambariAdminConsole')
 
   function loadMembers(){
     $scope.group.getMembers().then(function(members) {
+      $scope.group.groupTypeName = $t(GroupConstants.TYPES[$scope.group.group_type].LABEL_KEY);
       $scope.groupMembers = members;
       $scope.group.editingUsers = angular.copy($scope.groupMembers);
     });
@@ -81,6 +82,8 @@ angular.module('ambariAdminConsole')
     loadMembers();
   });
 
+  $scope.group.getGroupType();
+
   $scope.deleteGroup = function(group) {
     ConfirmationModal.show(
       $t('common.delete', {

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index b1179a4..b3e1e9f 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -1520,7 +1520,7 @@
     <dependency>
       <groupId>net.java.dev.jna</groupId>
       <artifactId>jna</artifactId>
-      <version>4.1.0</version>
+      <version>4.3.0</version>
     </dependency>
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/sbin/ambari-server
----------------------------------------------------------------------
diff --git a/ambari-server/sbin/ambari-server b/ambari-server/sbin/ambari-server
index cc1c923..d51cbfa 100755
--- a/ambari-server/sbin/ambari-server
+++ b/ambari-server/sbin/ambari-server
@@ -130,6 +130,10 @@ case "${1:-}" in
         echo -e "Setting up PAM properties..."
         $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
         ;;
+  migrate-ldap-pam)
+        echo -e "Migration LDAP to PAM"
+        $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
+        ;;
   setup-ldap)
         echo -e "Setting up LDAP properties..."
         $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
@@ -196,7 +200,7 @@ case "${1:-}" in
         ;;
   *)
         echo "Usage: $AMBARI_EXECUTABLE
-        {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos} [options]
+        {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos|setup-pam|migrate-ldap-pam} [options]
         Use $AMBARI_PYTHON_EXECUTABLE <action> --help to get details on options available.
         Or, simply invoke ambari-server.py --help to print the options."
         exit 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
new file mode 100644
index 0000000..8a3a012
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security.authentication;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.audit.AuditLoggerModule;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DbType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
+public class LdapToPamMigrationHelper {
+  private static final Logger LOG = LoggerFactory.getLogger(LdapToPamMigrationHelper.class);
+
+  @Inject
+  private DBAccessor dbAccessor;
+
+  /**
+   * Migrate LDAP user & groups to PAM
+   *
+   * @throws SQLException if an error occurs while executing the needed SQL statements
+   */
+  private void migrateLdapUsersGroups() throws SQLException {
+    if (dbAccessor.getDbType() != DbType.ORACLE) { // Tested MYSQL, POSTGRES && MYSQL)
+      dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from (select user_name from users where user_type = 'PAM') as a)");
+      dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from (select group_name from groups where group_type = 'PAM') as a)");
+    } else { // Tested ORACLE
+      dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from users where user_type = 'PAM')");
+      dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from groups where group_type = 'PAM')");
+    }
+  }
+
+  /**
+   * Support changes needed to migrate LDAP users & groups to PAM
+   *
+   * @param args Simple key value json map
+   */
+  public static void main(String[] args) {
+
+    try {
+      Injector injector = Guice.createInjector(new ControllerModule(), new AuditLoggerModule());
+      LdapToPamMigrationHelper migrationHelper = injector.getInstance(LdapToPamMigrationHelper.class);
+
+      migrationHelper.migrateLdapUsersGroups();
+
+    } catch (Throwable t) {
+      LOG.error("Caught exception on migration. Exiting...", t);
+      System.exit(1);
+    }
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 83edccc..4667ff6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -162,6 +162,10 @@ public class Users {
     if (userEntity != null) {
       userEntities.add(userEntity);
     }
+    userEntity = userDAO.findUserByNameAndType(userName, UserType.PAM);
+    if (userEntity != null) {
+      userEntities.add(userEntity);
+    }
     return (userEntities.isEmpty() || userEntities.size() > 1) ? null : new User(userEntities.get(0));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 71459fb..b25cd16 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -52,8 +52,8 @@ from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SY
   SETUP_ACTION, SETUP_SECURITY_ACTION,START_ACTION, STATUS_ACTION, STOP_ACTION, RESTART_ACTION, UPGRADE_ACTION, \
   UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION, START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, \
   UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION, SETUP_SSO_ACTION, \
-  DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, KERBEROS_SETUP_ACTION
-from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam
+  DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, MIGRATE_LDAP_PAM_ACTION, KERBEROS_SETUP_ACTION
+from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam, migrate_ldap_pam
 from ambari_server.userInput import get_validated_string_input
 from ambari_server.kerberos_setup import setup_kerberos
 
@@ -577,6 +577,9 @@ def init_parser_options(parser):
   parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
   parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
 
+  parser.add_option('--pam-config-file', default=None, help="Path to the PAM configuration file", dest="pam_config_file")
+  parser.add_option('--pam-auto-create-groups', default=None, help="Automatically create groups for authenticated users [true/false]", dest="pam_auto_create_groups")
+
   parser.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
   parser.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
   parser.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
@@ -766,7 +769,8 @@ def create_user_action_map(args, options):
         INSTALL_MPACK_ACTION: UserAction(install_mpack, options),
         UNINSTALL_MPACK_ACTION: UserAction(uninstall_mpack, options),
         UPGRADE_MPACK_ACTION: UserAction(upgrade_mpack, options),
-        PAM_SETUP_ACTION: UserAction(setup_pam),
+        PAM_SETUP_ACTION: UserAction(setup_pam, options),
+        MIGRATE_LDAP_PAM_ACTION: UserAction(migrate_ldap_pam, options),
         KERBEROS_SETUP_ACTION: UserAction(setup_kerberos, options)
       }
   return action_map

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/src/main/python/ambari_server/setupActions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
index 142a4d7..926db98 100644
--- a/ambari-server/src/main/python/ambari_server/setupActions.py
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -48,4 +48,5 @@ INSTALL_MPACK_ACTION = "install-mpack"
 UNINSTALL_MPACK_ACTION = "uninstall-mpack"
 UPGRADE_MPACK_ACTION = "upgrade-mpack"
 PAM_SETUP_ACTION = "setup-pam"
+MIGRATE_LDAP_PAM_ACTION = "migrate-ldap-pam"
 KERBEROS_SETUP_ACTION = "setup-kerberos"

http://git-wip-us.apache.org/repos/asf/ambari/blob/13a6fa84/ambari-server/src/main/python/ambari_server/setupSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
index 17d1025..f175d7c 100644
--- a/ambari-server/src/main/python/ambari_server/setupSecurity.py
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -37,9 +37,9 @@ from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons.os_utils import is_root, set_file_permissions, \
   run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
-from ambari_server.serverConfiguration import configDefaults, \
+from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
   encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
-  get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, write_property, \
+  get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
   get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
   read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
   BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, IS_LDAP_CONFIGURED, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
@@ -54,6 +54,8 @@ from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_ba
 from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
 from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, quit_if_has_answer
 from ambari_server.serverClassPath import ServerClassPath
+from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, \
+  get_jdbc_driver_path, ensure_jdbc_driver_is_installed, LINUX_DBMS_KEYS_LIST
 
 logger = logging.getLogger(__name__)
 
@@ -64,6 +66,9 @@ REGEX_TRUE_FALSE = "^(true|false)?$"
 REGEX_SKIP_CONVERT = "^(skip|convert)?$"
 REGEX_REFERRAL = "^(follow|ignore)?$"
 REGEX_ANYTHING = ".*"
+LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
+                                   "org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
+                                   " >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
 
 CLIENT_SECURITY_KEY = "client.security"
 
@@ -621,8 +626,12 @@ def setup_ldap(options):
   properties = get_ambari_properties()
 
   if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'pam':
-    err = "PAM is configured. Can not setup LDAP."
-    raise FatalException(1, err)
+    query = "PAM is currently configured, do you wish to use LDAP instead [y/n] (n)? "
+    if get_YN_input(query, False):
+      pass
+    else:
+      err = "PAM is configured. Can not setup LDAP."
+      raise FatalException(1, err)
 
   isSecure = get_is_secure(properties)
 
@@ -824,38 +833,112 @@ def ensure_can_start_under_current_user(ambari_user):
   return current_user
 
 class PamPropTemplate:
-  def __init__(self, properties, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
+  def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
     self.prop_name = i_prop_name
+    self.option = i_option
     self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
     self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
     self.prompt_regex = i_prompt_regex
     self.allow_empty_prompt = i_allow_empty_prompt
 
-def setup_pam():
+def init_pam_properties_list_reqd(properties, options):
+  properties = [
+    PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
+    PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
+  ]
+  return properties
+
+def setup_pam(options):
   if not is_root():
-    err = 'Ambari-server setup-pam should be run with ' \
-          'root-level privileges'
+    err = 'Ambari-server setup-pam should be run with root-level privileges'
     raise FatalException(4, err)
 
   properties = get_ambari_properties()
 
   if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'ldap':
-    err = "LDAP is configured. Can not setup PAM."
-    raise FatalException(1, err)
+    query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
+    if get_YN_input(query, False):
+      pass
+    else:
+      err = "LDAP is configured. Can not setup PAM."
+      raise FatalException(1, err)
+
+  pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
 
   pam_property_value_map = {}
   pam_property_value_map[CLIENT_SECURITY_KEY] = 'pam'
 
-  pamConfig = get_validated_string_input("Enter PAM configuration file: ", PAM_CONFIG_FILE, REGEX_ANYTHING,
-                                         "Invalid characters in the input!", False, False)
-
-  pam_property_value_map[PAM_CONFIG_FILE] = pamConfig
+  for pam_prop in pam_property_list_reqd:
+    input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
+                                       "Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
+                                       answer = pam_prop.option)
+    if input is not None and input != "":
+      pam_property_value_map[pam_prop.prop_name] = input
 
-  if get_YN_input("Do you want to allow automatic group creation [y/n] (y)? ", True):
-    pam_property_value_map[AUTO_GROUP_CREATION] = 'true'
-  else:
-    pam_property_value_map[AUTO_GROUP_CREATION] = 'false'
+  # Verify that the PAM config file exists, else show warning...
+  pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
+  if not os.path.exists(pam_config_file):
+    print_warning_msg("The PAM configuration file, {0} does not exist.  " \
+                      "Please create it before restarting Ambari.".format(pam_config_file))
 
   update_properties_2(properties, pam_property_value_map)
   print 'Saving...done'
   return 0
+
+#
+# Migration of LDAP users & groups to PAM
+#
+def migrate_ldap_pam(args):
+  properties = get_ambari_properties()
+
+  if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") != 'pam':
+    err = "PAM is not configured. Please configure PAM authentication first."
+    raise FatalException(1, err)
+
+  db_title = get_db_type(properties).title
+  confirm = get_YN_input("Ambari Server configured for %s. Confirm "
+                        "you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
+
+  if not confirm:
+    print_error_msg("Database backup is not confirmed")
+    return 1
+
+  jdk_path = get_java_exe_path()
+  if jdk_path is None:
+    print_error_msg("No JDK found, please run the \"setup\" "
+                    "command to install a JDK automatically or install any "
+                    "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
+    return 1
+
+  # At this point, the args does not have the ambari database information.
+  # Augment the args with the correct ambari database information
+  parse_properties_file(args)
+
+  ensure_jdbc_driver_is_installed(args, properties)
+
+  print 'Migrating LDAP Users & Groups to PAM'
+
+  serverClassPath = ServerClassPath(properties, args)
+  class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
+
+  command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
+
+  ambari_user = read_ambari_user()
+  current_user = ensure_can_start_under_current_user(ambari_user)
+  environ = generate_env(args, ambari_user, current_user)
+
+  (retcode, stdout, stderr) = run_os_command(command, env=environ)
+  print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
+  if stdout:
+    print "Console output from LDAP to PAM migration command:"
+    print stdout
+    print
+  if stderr:
+    print "Error output from LDAP to PAM migration command:"
+    print stderr
+    print
+  if retcode > 0:
+    print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
+  else:
+    print_info_msg('LDAP to PAM migration completed')
+  return retcode


[31/50] [abbrv] ambari git commit: AMBARI-21460. Add new kafka client properties to the ambari managed atlas config (smohanty)

Posted by jo...@apache.org.
AMBARI-21460. Add new kafka client properties to the ambari managed atlas config (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4ddbd624
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4ddbd624
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4ddbd624

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 4ddbd6246b5718f30912dec0f30939ac33b052dc
Parents: 48bc763
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Jul 14 15:42:52 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Jul 14 15:43:01 2017 -0700

----------------------------------------------------------------------
 .../4.0/stack-advisor/stack_advisor_25.py          |  3 ++-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml     |  4 ++++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml    |  6 +++++-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml        |  1 +
 .../ATLAS/configuration/application-properties.xml | 17 +++++++++++++++++
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml     |  4 ++++
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml    |  4 ++++
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml        |  1 +
 8 files changed, 38 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
index eb7d370..7e77382 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
@@ -734,7 +734,8 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = self.getStackRoot(services)
+    stack_root = self.getStackRoot(services
+    )
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 7fedc3a..6166318 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -123,6 +123,10 @@
             <type>atlas-env</type>
             <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+            <type>application-properties</type>
+            <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+          </definition>
         </changes>
       </component>
       <component name="SPARK_CLIENT">

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 3cab083..ce10e8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -453,7 +453,7 @@
           <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
-      
+
       <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas TLS Exclude Protocols">
         <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol">
           <summary>Updating Atlas TLS Exclude Protocols to exclude TLS v1.2</summary>
@@ -472,6 +472,10 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+        <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+      </execute-stage>
+
       <!--KAFKA-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
         <task xsi:type="configure" id="kafka_log4j_parameterize">

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 22c9a8d..840b17d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1101,6 +1101,7 @@
           <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol"/>
           <task xsi:type="configure" id="increase_atlas_zookeeper_timeouts"/>
           <task xsi:type="configure" id="atlas_env_gc_worker"/>
+          <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
index 91de1b0..c271dc3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
@@ -92,4 +92,21 @@
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>atlas.kafka.session.timeout.ms</name>
+    <value>30000</value>
+    <description>New Kafka consumer API</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.enable.auto.commit</name>
+    <value>false</value>
+    <description>New Kafka consumer API</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.auto.commit.enable</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 6dd2129..c2c1532 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -171,6 +171,10 @@
             <type>atlas-env</type>
             <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+            <type>application-properties</type>
+            <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index e262971..df609cd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -358,6 +358,10 @@
         <task xsi:type="configure" id="atlas_env_gc_worker"/>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+        <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+      </execute-stage>
+
       <!-- KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger Kms plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_kms_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4ddbd624/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 6b01ce9..b376fa7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -1034,6 +1034,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_atlas_plugin_cluster_name"/>
           <task xsi:type="configure" id="atlas_env_gc_worker"/>
+          <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>


[37/50] [abbrv] ambari git commit: AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)

Posted by jo...@apache.org.
AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7764e387
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7764e387
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7764e387

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 7764e387782962030de93f22f27d037040589595
Parents: 805dbe4
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jul 17 13:55:53 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jul 17 13:55:53 2017 +0300

----------------------------------------------------------------------
 .../AmbariCustomCommandExecutionHelper.java     |  14 ++-
 .../AmbariManagementControllerImpl.java         | 122 ++++++++++++++-----
 .../internal/HostResourceProvider.java          |   1 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   8 ++
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  26 +++-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  11 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   7 ++
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   6 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 ++-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  12 +-
 .../0.8/services/HDFS/package/scripts/params.py |  11 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../0.8/services/YARN/package/scripts/params.py |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |   9 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/exclude_hosts_list.j2     |  21 ++++
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 .../AmbariManagementControllerTest.java         |   8 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +-
 .../python/stacks/2.0.6/configs/default.json    |   2 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +-
 .../2.0.6/configs/default_no_install.json       |   2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +-
 .../default_update_exclude_file_only.json       |   2 +-
 .../2.0.6/configs/default_with_bucket.json      |   2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |   2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   2 +-
 .../test/python/stacks/2.3/configs/ats_1_5.json |   2 +-
 .../python/stacks/2.5/configs/hsi_default.json  |   2 +-
 .../2.5/configs/hsi_default_for_restart.json    |   2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   2 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../YARN/package/scripts/params_linux.py        |   9 +-
 .../YARN/package/scripts/params_windows.py      |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |  18 ++-
 .../package/templates/include_hosts_list.j2     |  21 ++++
 58 files changed, 520 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 9b19ade..e321559 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -139,7 +139,7 @@ public class AmbariCustomCommandExecutionHelper {
   public final static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
   public final static String DECOM_SLAVE_COMPONENT = "slave_type";
   public final static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
-  public final static String UPDATE_EXCLUDE_FILE_ONLY = "update_exclude_file_only";
+  public final static String UPDATE_FILES_ONLY = "update_files_only";
 
   private final static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
 
@@ -870,9 +870,9 @@ public class AmbariCustomCommandExecutionHelper {
               @Override
               public boolean shouldHostBeRemoved(final String hostname)
               throws AmbariException {
-                //Get UPDATE_EXCLUDE_FILE_ONLY parameter as string
+                //Get UPDATE_FILES_ONLY parameter as string
                 String upd_excl_file_only_str = actionExecutionContext.getParameters()
-                .get(UPDATE_EXCLUDE_FILE_ONLY);
+                .get(UPDATE_FILES_ONLY);
 
                 String decom_incl_hosts_str = actionExecutionContext.getParameters()
                 .get(DECOM_INCLUDED_HOSTS);
@@ -946,15 +946,17 @@ public class AmbariCustomCommandExecutionHelper {
         listOfExcludedHosts.add(sch.getHostName());
         if (alignMtnState) {
           sch.setMaintenanceState(MaintenanceState.ON);
+          LOG.info("marking Maintenance=ON on " + sch.getHostName());
         }
-        LOG.info("Decommissioning " + slaveCompType + " and marking Maintenance=ON on " + sch.getHostName());
+        LOG.info("Decommissioning " + slaveCompType + " on " + sch.getHostName());
       }
       if (filteredIncludedHosts.contains(sch.getHostName())) {
         sch.setComponentAdminState(HostComponentAdminState.INSERVICE);
         if (alignMtnState) {
           sch.setMaintenanceState(MaintenanceState.OFF);
+          LOG.info("marking Maintenance=OFF on " + sch.getHostName());
         }
-        LOG.info("Recommissioning " + slaveCompType + " and marking Maintenance=OFF on " + sch.getHostName());
+        LOG.info("Recommissioning " + slaveCompType + " on " + sch.getHostName());
       }
     }
 
@@ -1008,7 +1010,7 @@ public class AmbariCustomCommandExecutionHelper {
       }
 
       if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
-        commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
+        commandParams.put(UPDATE_FILES_ONLY, "false");
         addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString());
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 82171f6..a34422d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -42,6 +42,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+import static org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -3337,17 +3338,49 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    Map<String, String> serviceMasterForDecommissionMap = new HashMap<>();
     for (Map<State, List<ServiceComponentHost>> stateScHostMap :
         changedScHosts.values()) {
       for (Entry<State, List<ServiceComponentHost>> entry :
           stateScHostMap.entrySet()) {
         State newState = entry.getKey();
         for (ServiceComponentHost sch : entry.getValue()) {
+          String componentName = sch.getServiceComponentName();
+          //Create map for include/exclude files refresh
+          if (masterToSlaveMappingForDecom.containsValue(componentName) &&
+            sch.getState() == State.INIT && newState == State.INSTALLED) {
+            String serviceName = sch.getServiceName();
+            String masterComponentName = null;
+            for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+              if (entrySet.getValue().equals(componentName)) {
+                masterComponentName = entrySet.getKey();
+              }
+            }
+            try {
+              Service s = cluster.getService(serviceName);
+              //Filter services whose masters are not started
+              if (s.getServiceComponent(masterComponentName).getDesiredState() == State.STARTED) {
+                serviceMasterForDecommissionMap.put(serviceName, masterComponentName);
+              } else {
+                LOG.info(String.format("Not adding %s service from include/exclude files refresh map because it's master is not started", serviceName));
+              }
+            } catch (AmbariException e) {
+              LOG.error("Exception during INIT masters cleanup : ", e);
+            }
+          }
+
+          //actually set the new state
           sch.setDesiredState(newState);
         }
       }
     }
 
+    try {
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(serviceMasterForDecommissionMap, cluster.getClusterName());
+    } catch (AmbariException e) {
+      LOG.error("Exception during refresh include exclude files action : ", e);
+    }
+
     if (ignoredScHosts != null) {
       for (ServiceComponentHost scHost : ignoredScHosts) {
         scHost.setDesiredState(scHost.getState());
@@ -3602,18 +3635,40 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    Map<String, Map<String, String>> clusterServiceMasterForDecommissionMap = new HashMap<>();
+
     for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry : safeToRemoveSCHs.entrySet()) {
       for (ServiceComponentHost componentHost : entry.getValue()) {
         try {
           deleteHostComponent(entry.getKey(), componentHost);
           deleteStatusMetaData.addDeletedKey(componentHost.getHostName() + "/" + componentHost.getServiceComponentName());
-
+          //create cluster-master-service map to update all include/exclude files in one action
+          String componentName = componentHost.getServiceComponentName();
+          if (masterToSlaveMappingForDecom.containsValue(componentName)) {
+            String masterComponentName = null;
+            for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+              if (entrySet.getValue().equals(componentName)) {
+                masterComponentName = entrySet.getKey();
+              }
+            }
+            if (clusterServiceMasterForDecommissionMap.containsKey(componentHost.getClusterName())) {
+              clusterServiceMasterForDecommissionMap.get(componentHost.getClusterName()).put(componentHost.getServiceName(), masterComponentName);
+            } else {
+              Map<String, String> tempMap = new HashMap<>();
+              tempMap.put(componentHost.getServiceName(), masterComponentName);
+              clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), tempMap);
+            }
+          }
         } catch (Exception ex) {
           deleteStatusMetaData.addException(componentHost.getHostName() + "/" + componentHost.getServiceComponentName(), ex);
         }
       }
     }
 
+    for (String cluster : clusterServiceMasterForDecommissionMap.keySet()) {
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster), cluster);
+    }
+
     //Do not break behavior for existing clients where delete request contains only 1 host component.
     //Response for these requests will have empty body with appropriate error code.
     if (deleteStatusMetaData.getDeletedKeys().size() + deleteStatusMetaData.getExceptionForKeys().size() == 1) {
@@ -3636,7 +3691,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   private void deleteHostComponent(ServiceComponent serviceComponent, ServiceComponentHost componentHost) throws AmbariException {
-    String included_hostname = componentHost.getHostName();
     String serviceName = serviceComponent.getServiceName();
     String master_component_name = null;
     String slave_component_name = componentHost.getServiceComponentName();
@@ -3644,37 +3698,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     State slaveState = componentHost.getState();
     //Delete hostcomponents
     serviceComponent.deleteServiceComponentHosts(componentHost.getHostName());
-    // If deleted hostcomponents support decomission and were decommited and stopped
-    if (AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.containsValue(slave_component_name)
+    // If deleted hostcomponents support decomission and were decommited and stopped or in unknown state
+    if (masterToSlaveMappingForDecom.containsValue(slave_component_name)
             && desiredAdminState.equals(HostComponentAdminState.DECOMMISSIONED)
-            && slaveState.equals(State.INSTALLED)) {
-
-      for (Entry<String, String> entrySet : AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.entrySet()) {
+            && (slaveState.equals(State.INSTALLED) || slaveState.equals(State.UNKNOWN))) {
+      for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
         if (entrySet.getValue().equals(slave_component_name)) {
           master_component_name = entrySet.getKey();
         }
       }
-      //Clear exclud file or draining list except HBASE
-      if (!serviceName.equals(Service.Type.HBASE.toString())) {
-        HashMap<String, String> requestProperties = new HashMap<>();
-        requestProperties.put("context", "Remove host " +
-                included_hostname + " from exclude file");
-        requestProperties.put("exclusive", "true");
-        HashMap<String, String> params = new HashMap<>();
-        params.put("included_hosts", included_hostname);
-        params.put("slave_type", slave_component_name);
-        params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
-
-        //Create filter for RECOMISSION command
-        RequestResourceFilter resourceFilter
-                = new RequestResourceFilter(serviceName, master_component_name, null);
-        //Create request for RECOMISSION command
-        ExecuteActionRequest actionRequest = new ExecuteActionRequest(
-                serviceComponent.getClusterName(), AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
-                Collections.singletonList(resourceFilter), null, params, true);
-        //Send request
-        createAction(actionRequest, requestProperties);
-      }
 
       //Mark master component as needed to restart for remove host info from components UI
       Cluster cluster = clusters.getCluster(serviceComponent.getClusterName());
@@ -3689,6 +3721,40 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
   }
 
+  /**
+   * Creates and triggers an action to update include and exclude files for the master components depending on current cluster topology and components state
+   * @param serviceMasterMap
+   * @param clusterName
+   * @throws AmbariException
+   */
+  private void createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> serviceMasterMap, String clusterName) throws AmbariException {
+    //Clear include/exclude files or draining list except HBASE
+    serviceMasterMap.remove(Service.Type.HBASE.toString());
+    //exit if empty
+    if (serviceMasterMap.isEmpty()) {
+      return;
+    }
+    LOG.debug("Refresh include/exclude files action will be executed for " + serviceMasterMap);
+    HashMap<String, String> requestProperties = new HashMap<>();
+    requestProperties.put("context", "Update Include and Exclude Files for " + serviceMasterMap.keySet().toString());
+    requestProperties.put("exclusive", "true");
+    HashMap<String, String> params = new HashMap<>();
+    params.put(AmbariCustomCommandExecutionHelper.UPDATE_FILES_ONLY, "false");
+
+    //Create filter for command
+    List<RequestResourceFilter> resourceFilters = new ArrayList<>(serviceMasterMap.size());
+    for (String serviceName : serviceMasterMap.keySet()) {
+      resourceFilters.add(new RequestResourceFilter(serviceName, serviceMasterMap.get(serviceName), null));
+    }
+
+    //Create request for command
+    ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+      clusterName, AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
+      resourceFilters, null, params, true);
+    //Send action
+    createAction(actionRequest, requestProperties);
+  }
+
   @Override
   public void deleteUsers(Set<UserRequest> requests)
     throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index 71e105e..3fb6f07 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -980,6 +980,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
             throw new AmbariException(reason.toString());
           }
         } else {
+//          TODO why host with all components stopped can't be deleted? This functional is implemented and only this validation stops the request.
           if (!componentsToRemove.isEmpty()) {
             StringBuilder reason = new StringBuilder("Cannot remove host ")
                 .append(hostName)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index aad2db0..4eab367 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -99,6 +99,12 @@
       excluded.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
   <!--
     <property>
       <name>dfs.hosts</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index f15c5d6..6f702d3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -165,6 +165,14 @@ def hdfs(component=None):
          owner=params.hdfs_user,
          mode="f",
          )
+
+    if params.hdfs_include_file:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         mode="f",
+         )
+      pass
   if params.service_map.has_key(component):
     service_name = params.service_map[component]
     ServiceConfig(service_name,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 28036cf..e8a591c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -110,6 +110,14 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
          group=params.user_group
     )
 
+    if params.hdfs_include_file:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+      )
+      pass
+
     if do_format and not params.hdfs_namenode_format_disabled:
       format_namenode()
       pass
@@ -435,7 +443,15 @@ def decommission():
        group=user_group
   )
 
-  if not params.update_exclude_file_only:
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
+  if not params.update_files_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
     )
@@ -462,6 +478,14 @@ def decommission():
        owner=hdfs_user
   )
 
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
   if params.dfs_ha_enabled:
     # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
     # need to execute each command scoped to a particular namenode

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f98aafa..1d19175 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -161,7 +161,13 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 command_phase = default("/commandParams/phase","")
 
 klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
@@ -170,7 +176,6 @@ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executab
 hostname = config["hostname"]
 public_hostname = config["public_hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -557,4 +562,4 @@ if enable_ranger_hdfs:
 # need this to capture cluster name from where ranger hdfs plugin is enabled
 cluster_name = config['clusterName']
 
-# ranger hdfs plugin section end
\ No newline at end of file
+# ranger hdfs plugin section end

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
index 4a43626..3e712b3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
@@ -40,6 +40,13 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
 #decomission
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
index d0d0ede..a65b801 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
@@ -408,6 +408,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index f28274b..5f400a3 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -234,11 +234,17 @@ user_group = config['configurations']['cluster-env']['user_group']
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
 has_ats = not len(ats_host) == 0
 
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
 
@@ -338,7 +344,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
index 0f8ce73..652ffd9 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
@@ -56,4 +56,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only",False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index cf43f77..ecd8147 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
          mode="f"
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=yarn_user,
+         mode="f"
+    )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
 
 
@@ -159,7 +166,14 @@ class ResourcemanagerDefault(Resourcemanager):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
index 87684df..66d25cf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
@@ -72,6 +72,12 @@
       excluded.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
   <!--
     <property>
       <name>dfs.hosts</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
index 6de7735..19751f6 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -143,8 +143,16 @@ def decommission():
        owner=hdfs_user,
        group=user_group
   )
-  
-  if not params.update_exclude_file_only:
+
+  if params.hdfs_include_file:
+    File(params.include_file_path,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+         )
+    pass
+
+  if not params.update_files_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 19e223c..9cf163a 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -55,13 +55,18 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
 
 kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
 hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
@@ -241,4 +246,4 @@ ttnode_heapsize = "1024m"
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
+script_https_protocol = Script.get_force_https_protocol_name()

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index 8e9b8b1..3cb5add 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
index 33496cfe..87b5992 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
@@ -118,6 +118,14 @@ user_group = config['configurations']['cluster-env']['user_group']
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 hostname = config['hostname']
 
 ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
@@ -162,7 +170,7 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
 )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
index 4d40d68..8bd76bf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
@@ -89,7 +89,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..71f30f0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,6 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 #exclude file
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
 
 hostname = config['hostname']
 
@@ -128,7 +135,7 @@ HdfsDirectory = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only",False)
 
 hadoop_bin = "/usr/lib/hadoop/sbin"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             user=yarn_user)
       pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..e39c4d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,7 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 #exclude file
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
-
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
 hostname = config['hostname']
 
 if security_enabled:
@@ -128,7 +134,7 @@ HdfsDirectory = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only",False)
 
 hadoop_bin = "/usr/lib/hadoop/sbin"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             user=yarn_user)
       pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 43e2ed4..81b3580 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -6345,8 +6345,8 @@ public class AmbariManagementControllerTest {
     execCmd = storedTasks.get(0).getExecutionCommandWrapper
         ().getExecutionCommand();
     Map<String, String> cmdParams = execCmd.getCommandParams();
-    Assert.assertTrue(cmdParams.containsKey("update_exclude_file_only"));
-    Assert.assertTrue(cmdParams.get("update_exclude_file_only").equals("false"));
+    Assert.assertTrue(cmdParams.containsKey("update_files_only"));
+    Assert.assertTrue(cmdParams.get("update_files_only").equals("false"));
     Assert.assertNotNull(storedTasks);
     Assert.assertEquals(1, storedTasks.size());
     Assert.assertEquals(HostComponentAdminState.DECOMMISSIONED, scHost.getComponentAdminState());
@@ -6401,8 +6401,8 @@ public class AmbariManagementControllerTest {
       Assert.assertTrue(hrc.getCommandDetail().contains(host1));
       Assert.assertTrue(hrc.getCommandDetail().contains(host2));
       cmdParams = hrc.getExecutionCommandWrapper().getExecutionCommand().getCommandParams();
-      if(!cmdParams.containsKey("update_exclude_file_only")
-          || !cmdParams.get("update_exclude_file_only").equals("true")) {
+      if(!cmdParams.containsKey("update_files_only")
+          || !cmdParams.get("update_files_only").equals("true")) {
         countRefresh++;
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index acd40b0..862a17e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1041,7 +1041,7 @@ class TestNamenode(RMFTestCase):
                               bin_dir = '/usr/bin')
     self.assertNoMoreResources()
 
-  def test_decommission_update_exclude_file_only(self):
+  def test_decommission_update_files_only(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index ea00a37..f928073 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 87a9034..f337f41 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -39,7 +39,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 71423c8..f3e8dc3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -32,7 +32,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 009ff6d..7b0f78d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 2b078c3..01f0efc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index 571b737..0cbd322 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 7fdb449..cfcf5e1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index 7378b68..7db73ab 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -36,7 +36,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index 2ea07e4..f50a207 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -33,7 +33,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "true",
+        "update_files_only" : "true",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
index 5080d30..c1eb868 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index 6ec9ec9..c99d10b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -32,7 +32,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "output_file":"HDFS_CLIENT-configs.tar.gz"

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
index 1550715..1a4d676 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
@@ -31,7 +31,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index f572413..52a1fde 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index 5147603..06682bc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -35,7 +35,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 8d12b98..4ffa29f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -34,7 +34,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 5327865..76a110e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -38,7 +38,7 @@
         "script": "scripts/yarn_client.py",
         "excluded_hosts": "host1",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false"
+        "update_files_only" : "false"
     },
     "taskId": 186, 
     "public_hostname": "c6401.ambari.apache.org",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
index b4342ad..475a6f9 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
@@ -31,7 +31,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
index 9dcb451..7622212 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
index f6de1c4..c2320ba 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
@@ -39,7 +39,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
index 3fd9f72..7b79d84 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
@@ -38,7 +38,7 @@
         "script": "scripts/service_check.py",
         "excluded_hosts": "host1,host2",
         "mark_draining_only" : "false",
-        "update_exclude_file_only" : "false",
+        "update_files_only" : "false",
         "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
         "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
         "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
index 59ff82b..f3ea462 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
@@ -388,6 +388,12 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>manage.include.files</name>
+    <value>false</value>
+    <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.http.policy</name>
     <value>HTTP_ONLY</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
index 4d42861..da54b7c 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
@@ -233,6 +233,13 @@ has_ats = not len(ats_host) == 0
 
 nm_hosts = default("/clusterHostInfo/nm_hosts", [])
 
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
 
@@ -315,7 +322,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 


[14/50] [abbrv] ambari git commit: AMBARI-21464 - Ranger is Missing from BigInsights to HDP Upgrade Packs (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21464 - Ranger is Missing from BigInsights to HDP Upgrade Packs (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0cb9194f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0cb9194f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0cb9194f

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 0cb9194f568534f7dde7d881fc31f06a619759f9
Parents: 69e492f
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 21:32:10 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 21:32:10 2017 -0400

----------------------------------------------------------------------
 .../4.2.5/upgrades/config-upgrade.xml           |  68 +++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 164 ++++++++++++++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  94 +++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 190 +++++++++++++++++++
 4 files changed, 516 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index b51a744..e33b8fb 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -63,6 +63,74 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="bind_anonymous" />
+          </definition>
+          <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
+            <type>admin-log4j</type>
+            <set key="ranger_xa_log_maxfilesize" value="256"/>
+            <set key="ranger_xa_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
+            <type>usersync-log4j</type>
+            <set key="ranger_usersync_log_maxfilesize" value="256"/>
+            <set key="ranger_usersync_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade">
+            <type>ranger-ugsync-site</type>
+            <set key="ranger.usersync.ldap.deltasync" value="false"
+              if-type="ranger-ugsync-site" if-key="ranger.usersync.source.impl.class" if-value="org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <changes>
+          <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
+            <type>tagsync-log4j</type>
+            <set key="ranger_tagsync_log_maxfilesize" value="256"/>
+            <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="RANGER_KMS">
+    <component name="RANGER_KMS_SERVER">
+      <changes>
+        <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+          <type>kms-log4j</type>
+          <set key="ranger_kms_log_maxfilesize" value="256"/>
+          <set key="ranger_kms_log_maxbackupindex" value="20"/>
+          <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+          <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+          <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+          <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+          <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+          <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+        </definition>
+        <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl">
+          <type>ranger-kms-site</type>
+          <transfer operation="delete" delete-key="ranger.https.attrib.keystore.file" if-type="ranger-kms-site" if-key="ranger.service.https.attrib.keystore.file" if-key-state="present"/>
+          <transfer operation="delete" delete-key="ranger.service.https.attrib.clientAuth" if-type="ranger-kms-site" if-key="ranger.service.https.attrib.client.auth" if-key-state="present"/>
+        </definition>
+      </changes>
+    </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 2c82cb3..5f1e06c 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -25,6 +25,7 @@
   <prerequisite-checks>
     <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
     <check>org.apache.ambari.server.checks.JavaVersionCheck</check>
+    <check>org.apache.ambari.server.checks.RangerSSLConfigCheck</check>
     <configuration>
       <!-- Configuration properties for all pre-reqs including required pre-reqs -->
       <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
@@ -117,6 +118,18 @@
           <function>prepare_express_upgrade</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Backup Ranger KMS Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
@@ -140,6 +153,16 @@
         <component>NFS_GATEWAY</component>
       </service>
 
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+
       <service name="ZOOKEEPER">
         <component>ZOOKEEPER_SERVER</component>
       </service>
@@ -187,6 +210,44 @@
         <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
       </execute-stage>
 
+      <!--RANGER-->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Apply config changes for Ranger Usersync">
+        <task xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade"/>
+      </execute-stage>
+
+      <!--RANGER-KMS-->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl"/>
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>
@@ -254,6 +315,18 @@
       </service>
     </group>
 
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -292,6 +365,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -374,6 +457,8 @@
       <skippable>true</skippable>
       <priority>
         <service>ZOOKEEPER</service>
+        <service>RANGER</service>
+        <service>RANGER_KMS</service>
         <service>HDFS</service>
         <service>KAFKA</service>
         <service>YARN</service>
@@ -567,6 +652,61 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <pre-upgrade>
+
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_admin.py</script>
+            <function>set_pre_start</function>
+          </task>
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Upgrading Ranger database schema</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_database</function>
+          </task>
+
+          <task xsi:type="configure_function" hosts="all" />
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Applying Ranger java patches</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_java_patches</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_tagsync.py</script>
+            <function>configure_atlas_user_for_tagsync</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -605,6 +745,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index b46f476..070207a 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -18,6 +18,98 @@
 
 <upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="upgrade-config.xsd">
   <services>
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <changes>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties">
+            <type>admin-properties</type>
+            <transfer operation="delete" delete-key="audit_db_name" />
+            <transfer operation="delete" delete-key="audit_db_user" />
+            <transfer operation="delete" delete-key="audit_db_password" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site">
+            <type>ranger-admin-site</type>
+            <set key="ranger.audit.source.type" value="solr"/>
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.driver" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.url" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.user" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.password" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.credential.alias" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.dialect" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property">
+            <type>ranger-admin-site</type>
+            <transfer operation="delete" delete-key="ranger.sso.cookiename" />
+            <transfer operation="delete" delete-key="ranger.sso.query.param.originalurl" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag">
+            <type>ranger-env</type>
+            <set key="is_external_solrCloud_enabled" value="true"
+              if-type="ranger-env" if-key="is_solrCloud_enabled" if-value="true"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="bind_anonymous" />
+          </definition>
+        </changes>
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade">
+            <type>ranger-ugsync-site</type>
+            <set key="ranger.usersync.ldap.deltasync" value="false"
+              if-type="ranger-ugsync-site" if-key="ranger.usersync.source.impl.class" if-value="org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db">
+            <type>ranger-kms-audit</type>
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+            <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+          </definition>
+          <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+            <type>kms-log4j</type>
+            <set key="ranger_kms_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_log_maxbackupindex" value="20"/>
+            <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl">
+            <type>ranger-kms-site</type>
+            <transfer operation="delete" delete-key="ranger.https.attrib.keystore.file"
+              if-type="ranger-kms-site" if-key="ranger.service.https.attrib.keystore.file" if-key-state="present"/>
+            <transfer operation="delete" delete-key="ranger.service.https.attrib.clientAuth"
+              if-type="ranger-kms-site" if-key="ranger.service.https.attrib.client.auth" if-key-state="present"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <changes>
@@ -143,4 +235,6 @@
       </component>
     </service>
   </services>
+
+
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index b66c234..5b8f8d9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -25,6 +25,7 @@
   <prerequisite-checks>
     <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
     <check>org.apache.ambari.server.checks.JavaVersionCheck</check>
+    <check>org.apache.ambari.server.checks.RangerSSLConfigCheck</check>
     <configuration>
       <!-- Configuration properties for all pre-reqs including required pre-reqs -->
       <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
@@ -117,6 +118,18 @@
           <function>prepare_express_upgrade</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Backup Ranger KMS Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
@@ -140,6 +153,16 @@
         <component>NFS_GATEWAY</component>
       </service>
 
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+
       <service name="ZOOKEEPER">
         <component>ZOOKEEPER_SERVER</component>
       </service>
@@ -211,6 +234,70 @@
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie server">
         <task xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" />
       </execute-stage>
+
+      <!-- RANGER -->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
+        <condition xsi:type="security" type="kerberos"/>
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
+          <summary>Calculating Ranger Properties</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Configuring Ranger Alerts">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction">
+          <summary>Configuring Ranger Alerts</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Apply config changes for Ranger Usersync">
+        <task xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade"/>
+      </execute-stage>
+
+      <!-- RANGER KMS -->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Calculating Ranger Properties">
+        <condition xsi:type="security" type="kerberos"/>
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKmsProxyConfig">
+          <summary>Adding Ranger proxy user properties</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl"/>
+      </execute-stage>
     </group>
 
 
@@ -254,6 +341,18 @@
       </service>
     </group>
 
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -292,6 +391,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -374,6 +483,8 @@
       <skippable>true</skippable>
       <priority>
         <service>ZOOKEEPER</service>
+        <service>RANGER</service>
+        <service>RANGER_KMS</service>
         <service>HDFS</service>
         <service>KAFKA</service>
         <service>YARN</service>
@@ -567,6 +678,61 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <pre-upgrade>
+
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_admin.py</script>
+            <function>set_pre_start</function>
+          </task>
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Upgrading Ranger database schema</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_database</function>
+          </task>
+
+          <task xsi:type="configure_function" hosts="all" />
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Applying Ranger java patches</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_java_patches</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_tagsync.py</script>
+            <function>configure_atlas_user_for_tagsync</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -605,6 +771,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>


[16/50] [abbrv] ambari git commit: AMBARI-21419 EU to auto switch to HDP tomcat for Oozie - updated config type (dili)

Posted by jo...@apache.org.
AMBARI-21419 EU to auto switch to HDP tomcat for Oozie - updated config type (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/94eb0ddf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/94eb0ddf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/94eb0ddf

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 94eb0ddf5b3da68e239a9600bf5393f5de5677ec
Parents: c5f2efa
Author: Di Li <di...@apache.org>
Authored: Thu Jul 13 11:58:35 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Jul 13 11:58:35 2017 -0400

----------------------------------------------------------------------
 .../resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml | 2 +-
 .../resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/94eb0ddf/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index b51a744..8912322 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -136,7 +136,7 @@
             <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />            
           </definition>
           <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Updating oozie env">
-            <type>oozie-site</type>
+            <type>oozie-env</type>
             <replace key="content" find="/usr/lib/bigtop-tomcat7-7.0.75" replace-with="/usr/lib/bigtop-tomcat" />
           </definition>
         </changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/94eb0ddf/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index b46f476..3f6962b 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -136,7 +136,7 @@
             <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />            
           </definition>
           <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Updating oozie env">
-            <type>oozie-site</type>
+            <type>oozie-env</type>
             <replace key="content" find="/usr/lib/bigtop-tomcat7-7.0.75" replace-with="/usr/lib/bigtop-tomcat" />
           </definition>
         </changes>


[40/50] [abbrv] ambari git commit: AMBARI-21474. HBase REST Server is missing after Ambari upgrade

Posted by jo...@apache.org.
AMBARI-21474. HBase REST Server is missing after Ambari upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1c3784d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1c3784d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1c3784d

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: b1c3784dad1676bad22514e520075ebc267463fc
Parents: 7764e38
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Fri Jul 14 13:28:59 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Jul 17 13:34:52 2017 +0200

----------------------------------------------------------------------
 .../HBASE/package/files/draining_servers.rb     | 164 +++++++
 .../HBASE/package/files/hbase-smoke-cleanup.sh  |  23 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |  34 ++
 .../services/HBASE/package/scripts/__init__.py  |  19 +
 .../services/HBASE/package/scripts/functions.py |  54 +++
 .../services/HBASE/package/scripts/hbase.py     | 234 ++++++++++
 .../HBASE/package/scripts/hbase_client.py       |  82 ++++
 .../HBASE/package/scripts/hbase_decommission.py |  93 ++++
 .../HBASE/package/scripts/hbase_master.py       | 163 +++++++
 .../HBASE/package/scripts/hbase_regionserver.py | 166 +++++++
 .../package/scripts/hbase_restgatewayserver.py  |  83 ++++
 .../HBASE/package/scripts/hbase_service.py      |  93 ++++
 .../HBASE/package/scripts/hbase_upgrade.py      |  41 ++
 .../services/HBASE/package/scripts/params.py    |  29 ++
 .../HBASE/package/scripts/params_linux.py       | 447 +++++++++++++++++++
 .../HBASE/package/scripts/params_windows.py     |  43 ++
 .../package/scripts/phoenix_queryserver.py      |  88 ++++
 .../HBASE/package/scripts/phoenix_service.py    |  55 +++
 .../HBASE/package/scripts/service_check.py      |  95 ++++
 .../HBASE/package/scripts/setup_ranger_hbase.py | 106 +++++
 .../HBASE/package/scripts/status_params.py      |  68 +++
 .../services/HBASE/package/scripts/upgrade.py   |  65 +++
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 117 +++++
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 116 +++++
 .../HBASE/package/templates/hbase-smoke.sh.j2   |  44 ++
 .../HBASE/package/templates/hbase.conf.j2       |  35 ++
 .../package/templates/hbase_client_jaas.conf.j2 |  23 +
 .../templates/hbase_grant_permissions.j2        |  40 ++
 .../package/templates/hbase_master_jaas.conf.j2 |  26 ++
 .../templates/hbase_queryserver_jaas.conf.j2    |  26 ++
 .../templates/hbase_regionserver_jaas.conf.j2   |  26 ++
 .../package/templates/hbase_rest_jaas.conf.j2   |  26 ++
 .../HBASE/package/templates/regionservers.j2    |  20 +
 33 files changed, 2744 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
new file mode 100644
index 0000000..cde19e4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..19276f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+awk '/id/,/date/' /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..f98b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def ensure_unit_for_memory(memory_size):
+  memory_size_values = re.findall('\d+', str(memory_size))
+  memory_size_unit = re.findall('\D+', str(memory_size))
+
+  if len(memory_size_values) > 0:
+    unit = 'm'
+    if len(memory_size_unit) > 0:
+      unit = memory_size_unit[0]
+    if unit not in ['b', 'k', 'm', 'g', 't', 'p']:
+      raise Exception("Memory size unit error. %s - wrong unit" % unit)
+    return "%s%s" % (memory_size_values[0], unit)
+  else:
+    raise Exception('Memory size can not be calculated')

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..837cf41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management import *
+import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from urlparse import urlparse
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
+
+  if params.service_map.has_key(name):
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hbase_user,
+                  password = Script.get_password(params.hbase_user))
+
+# name is 'master' or 'regionserver' or 'queryserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
+  import params
+
+  Directory( params.etc_prefix_dir,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+   
+  Directory(params.java_io_tmpdir,
+      create_parents = True,
+      mode=0777
+  )
+
+  # If a file location is specified in ioengine parameter,
+  # ensure that directory exists. Otherwise create the
+  # directory with permissions assigned to hbase:hadoop.
+  ioengine_input = params.ioengine_param
+  if ioengine_input != None:
+    if ioengine_input.startswith("file:/"):
+      ioengine_fullpath = ioengine_input[5:]
+      ioengine_dir = os.path.dirname(ioengine_fullpath)
+      Directory(ioengine_dir,
+          owner = params.hbase_user,
+          group = params.user_group,
+          create_parents = True,
+          mode = 0755
+      )
+  
+  parent_dir = os.path.dirname(params.tmp_dir)
+  # In case if we have several placeholders in path
+  while ("${" in parent_dir):
+    parent_dir = os.path.dirname(parent_dir)
+  if parent_dir != os.path.abspath(os.sep) :
+    Directory (parent_dir,
+          create_parents = True,
+          cd_access="a",
+    )
+    Execute(("chmod", "1777", parent_dir), sudo=True)
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "core-site.xml",
+             conf_dir = params.hbase_conf_dir,
+             configurations = params.config['configurations']['core-site'],
+             configuration_attributes=params.config['configuration_attributes']['core-site'],
+             owner = params.hbase_user,
+             group = params.user_group
+  )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig( "hdfs-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template),
+       group = params.user_group,
+  )
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+  
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+    
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name == "master":
+    if not params.hbase_root_dir_scheme or params.hbase_root_dir_scheme == urlparse(params.default_fs).scheme:  # if hbase.rootdir has no scheme specified or is in HDFS
+      # Create hbase.rootdir
+      params.HdfsResource(params.hbase_hdfs_root_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user
+      )
+    else:  # hbase.rootdir is not in HDFS
+      # Do NOT create hbase.rootdir, but only log a message
+      Logger.info(format("hbase.rootidr '{params.hbase_hdfs_root_dir} not created', as its location is not in HDFS."))
+
+    params.HdfsResource(params.hbase_staging_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    if params.create_hbase_home_directory:
+      params.HdfsResource(params.hbase_home_directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user,
+                          mode=0755
+      )
+    params.HdfsResource(None, action="execute")
+
+  if params.phoenix_enabled:
+    Package(params.phoenix_package,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+
+def hbase_TemplateConfig(name, tag=None):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..f18a96a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HbaseClient(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
+  def get_component_name(self):
+    return "hbase-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("hbase-client", params.version)
+
+      # phoenix may not always be deployed
+      try:
+        stack_select.select("phoenix-client", params.version)
+      except Exception as e:
+        print "Ignoring error due to missing phoenix-client"
+        print str(e)
+
+
+      # set all of the hadoop clients since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..022465a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd_master
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..ae0e0d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import Service
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_component_name(self):
+    return "hbase-master"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
+    hbase_service('master', action='start')
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hbase_service('master', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..370167b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management import *
+from resource_management.core import shell
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='regionserver')
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_component_name(self):
+    return "hbase-regionserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-regionserver")
+
+    hbase_service('regionserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
new file mode 100644
index 0000000..1d028d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+
+class HbaseRestGatewayServer(Script):
+  def get_component_name(self):
+    return "hbase-restserver"
+
+  def install(self, env):
+    self.install_packages(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='rest')
+    
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-restserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'rest',
+      action = 'start'
+    )
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'rest',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-rest.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseRestGatewayServer().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..41daaeb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from datetime import datetime
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.logger import Logger
+
+def hbase_service(name, action = 'start'):
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    pid_expression = as_sudo(["cat", pid_file])
+    no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
+    
+    # delete wal log if HBase version has moved down
+    if params.to_backup_wal_dir:
+      wal_directory = params.wal_directory
+      timestamp = datetime.now()
+      timestamp_format = '%Y%m%d%H%M%S'
+      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
+
+      check_if_wal_dir_exists = format("hdfs dfs -ls {wal_directory}")
+      wal_dir_exists = False
+      try:
+        Execute(check_if_wal_dir_exists,
+                user=params.hbase_user
+                )
+        wal_dir_exists = True
+      except Exception, e:
+        Logger.error(format("Did not find HBase WAL directory {wal_directory}. It's possible that it was already moved. Exception: {e.message}"))
+
+      if wal_dir_exists:
+        move_wal_dir_cmd = format("hdfs dfs -mv {wal_directory} {wal_directory_backup}")
+        try:
+          Execute(move_wal_dir_cmd,
+            user=params.hbase_user
+          )
+        except Exception, e:
+          Logger.error(format("Failed to backup HBase WAL directory, command: {move_wal_dir_cmd} . Exception: {e.message}"))
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      try:
+        Execute ( daemon_cmd,
+          not_if = no_op_test,
+          user = params.hbase_user
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      try:
+        Execute ( daemon_cmd,
+          user = params.hbase_user,
+          only_if = no_op_test,
+          # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+          timeout = params.hbase_regionserver_shutdown_timeout,
+          on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"),
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+      
+      File(pid_file,
+           action = "delete",
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
new file mode 100644
index 0000000..2dc9883
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def take_snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+  def restore_snapshot(self, env):
+    import params
+    print "TODO AMBARI-12698"
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..f3208ce
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
new file mode 100644
index 0000000..bff022d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
@@ -0,0 +1,447 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from urlparse import urlparse
+
+from functions import calc_xmn_from_xms, ensure_unit_for_memory
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import string_set_intersection
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.constants import Direction
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = status_params.stack_name
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+version = default("/commandParams/version", None)
+component_directory = status_params.component_directory
+etc_prefix_dir = "/etc/hbase"
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_root = status_params.stack_root
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# hadoop default parameters
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_max_direct_memory_size = None
+
+# hadoop parameters for stacks supporting rolling_upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  daemon_script = format('{stack_root}/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('{stack_root}/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('{stack_root}/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('{stack_root}/current/hbase-client/bin/hbase')
+
+  hbase_max_direct_memory_size  = default('configurations/hbase-env/hbase_max_direct_memory_size', None)
+
+  tmp_daemon_script = format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh")
+  if os.path.exists(tmp_daemon_script):
+    daemon_script = tmp_daemon_script
+
+  region_mover = format("{stack_root}/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("{stack_root}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase")
+
+
+hbase_conf_dir = status_params.hbase_conf_dir
+limits_conf_dir = status_params.limits_conf_dir
+
+hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000")
+hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
+
+# no symlink for phoenix-server at this point
+phx_daemon_script = format('{stack_root}/current/phoenix-server/bin/queryserver.py')
+
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir", "/tmp")
+master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize'])
+
+regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)
+
+phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
+phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
+has_phoenix = len(phoenix_hosts) > 0
+
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
+  phoenix_package = format("phoenix_{underscored_version}_*")
+elif OSCheck.is_ubuntu_family():
+  phoenix_package = format("phoenix-{dashed_version}-.*")
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+local_dir = config['configurations']['hbase-site']['hbase.local.dir']
+ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None)
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+rest_jaas_config_file = format("{hbase_conf_dir}/hbase_rest_jaas.conf")
+queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+
+titan_user = default('configurations/titan-env/titan_user', 'titan')
+titan_user_permissions = "RWC"
+titan_user_hbase_namespace = "@default"
+
+service_check_data = get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  _rest_server_jaas_princ = config['configurations']['hbase-site']['hbase.rest.kerberos.principal']
+  _rest_server_spnego_jaas_princ = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.principal']
+  _queryserver_jaas_princ = config['configurations']['hbase-site']['phoenix.queryserver.kerberos.principal']
+  if not is_empty(_queryserver_jaas_princ):
+    queryserver_jaas_princ =_queryserver_jaas_princ.replace('_HOST',_hostname_lowercase)
+  if not is_empty(_rest_server_jaas_princ):
+    rest_server_jaas_princ = _rest_server_jaas_princ.replace('_HOST',_hostname_lowercase)
+  if not is_empty(_rest_server_spnego_jaas_princ):
+    rest_server_spnego_jaas_princ = _rest_server_spnego_jaas_princ.replace('_HOST',_hostname_lowercase)
+
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+rest_server_keytab_path = config['configurations']['hbase-site']['hbase.rest.keytab.file']
+rest_server_spnego_keytab_path = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.keytab']
+queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+  kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
+  master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf")
+else:
+  kinit_cmd = ""
+  kinit_cmd_master = ""
+  master_security_config = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_root_dir_scheme = urlparse(hbase_hdfs_root_dir).scheme
+
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hbase properties
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hbase'
+
+common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+ranger_env = config['configurations']['ranger-env']
+ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
+policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
+
+#For curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+enable_ranger_hbase = False
+if has_ranger_admin:
+  enable_ranger_hbase = (config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled'].lower() == 'yes')
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+  repo_config_password = unicode(config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  previous_jdbc_jar_name = None
+
+  if stack_supports_ranger_audit_db:
+    if xa_audit_db_flavor == 'mysql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "com.mysql.jdbc.Driver"
+    elif xa_audit_db_flavor == 'oracle':
+      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+      colon_count = xa_db_host.count(':')
+      if colon_count == 2 or colon_count == 0:
+        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+      else:
+        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+      jdbc_driver = "oracle.jdbc.OracleDriver"
+    elif xa_audit_db_flavor == 'postgres':
+      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "org.postgresql.Driver"
+    elif xa_audit_db_flavor == 'mssql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+    elif xa_audit_db_flavor == 'sqla':
+      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  sql_connector_jar = ''
+
+  if security_enabled:
+    master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal']
+
+  hbase_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'hadoop.security.authentication': hadoop_security_authentication,
+    'hbase.security.authentication': hbase_security_authentication,
+    'hbase.zookeeper.property.clientPort': hbase_zookeeper_property_clientPort,
+    'hbase.zookeeper.quorum': hbase_zookeeper_quorum,
+    'zookeeper.znode.parent': zookeeper_znode_parent,
+    'commonNameForCertificate': common_name_for_certificate,
+    'hbase.master.kerberos.principal': master_principal if security_enabled else ''
+  }
+
+  hbase_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hbase_ranger_plugin_config),
+    'description': 'hbase repo',
+    'name': repo_name,
+    'repositoryType': 'hbase',
+    'assetType': '2'
+  }
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['policy.grantrevoke.auth.users'] = hbase_user
+
+  if stack_supports_ranger_kerberos:
+    hbase_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hbase_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hbase_ranger_plugin_config,
+      'description': 'hbase repo',
+      'name': repo_name,
+      'type': 'hbase'
+    }
+
+  ranger_hbase_principal = None
+  ranger_hbase_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower():
+    ranger_hbase_principal = master_jaas_princ
+    ranger_hbase_keytab = master_keytab_path
+  elif  stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
+    ranger_hbase_principal = regionserver_jaas_princ
+    ranger_hbase_keytab = regionserver_keytab_path
+
+  xa_audit_db_is_enabled = False
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db']
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+  ssl_keystore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+  ssl_truststore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+  #For SQLA explicitly disable audit to DB for Ranger
+  if xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False
+
+
+create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted)
+hbase_home_directory = format("/user/{hbase_user}")
+
+atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
+has_atlas = len(atlas_hosts) > 0
+
+metadata_user = default('/configurations/atlas-env/metadata_user', None)
+atlas_graph_storage_hostname = default('/configurations/application-properties/atlas.graph.storage.hostname', None)
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+if has_atlas:
+  zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname, hbase_zookeeper_quorum)
+  atlas_with_managed_hbase = len(zk_hosts_matches) > 0
+else:
+  atlas_with_managed_hbase = False
+
+wal_directory = "/apps/hbase/data/MasterProcWALs"
+
+backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
+
+#Need to make sure not to keep removing WAL logs once EU is finalized.
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
new file mode 100644
index 0000000..7d634cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+import status_params
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hbase_user = hadoop_user
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
+
+service_map = {
+  'master' : status_params.hbase_master_win_service_name,
+  'regionserver' : status_params.hbase_regionserver_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
new file mode 100644
index 0000000..82113e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script import Script
+from phoenix_service import phoenix_service
+from hbase import hbase
+
+# Note: Phoenix Query Server is only applicable to stack version supporting Phoenix.
+class PhoenixQueryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+
+  def get_component_name(self):
+    return "phoenix-server"
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='queryserver')
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    phoenix_service('start')
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    phoenix_service('stop')
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_version_formatted and check_stack_feature(StackFeature.PHOENIX, params.stack_version_formatted):     
+      # phoenix uses hbase configs
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("phoenix-server", params.version)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    phoenix_service('status')
+
+
+  def security_status(self, env):
+    self.put_structured_out({"securityState": "UNSECURED"})
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  PhoenixQueryServer().execute()
\ No newline at end of file


[11/50] [abbrv] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
new file mode 100755
index 0000000..79438be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
@@ -0,0 +1,20 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+import com.thinkaurelius.titan.core.TitanFactory;
+
+graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
+g = graph.traversal()
+l = g.V().values('name').toList()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
new file mode 100755
index 0000000..8019748
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
@@ -0,0 +1,202 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root= Script.get_stack_root()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+full_stack_version = get_stack_version('titan-client')
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+titan_user = config['configurations']['titan-env']['titan_user']
+user_group = config['configurations']['cluster-env']['user_group']
+titan_log_dir = config['configurations']['titan-env']['titan_log_dir']
+titan_server_port = config['configurations']['titan-env']['titan_server_port']
+titan_hdfs_home_dir = config['configurations']['titan-env']['titan_hdfs_home_dir']
+titan_log_file = format("{titan_log_dir}/titan-{titan_server_port}.log")
+titan_err_file = format("{titan_log_dir}/titan-{titan_server_port}.err")
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  titan_jaas_princ = config['configurations']['titan-env']['titan_principal_name'].replace('_HOST',_hostname_lowercase)
+  titan_keytab_path = config['configurations']['titan-env']['titan_keytab_path']
+
+titan_bin_dir = format('{stack_root}/current/titan-client/bin')
+titan_data_dir = format('{stack_root}/current/titan-server/data')
+# titan configurations
+titan_conf_dir = format('{stack_root}/current/titan-server/conf')
+titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
+titan_env_props = config['configurations']['titan-env']['content']
+log4j_console_props = config['configurations']['titan-log4j']['content']
+
+# titan server configurations
+titan_server_conf_dir=format('{stack_root}/current/titan-server/conf/gremlin-server')
+gremlin_server_configs = config['configurations']['gremlin-server']['content']
+
+titan_server_sasl= str(config['configurations']['titan-env']['SimpleAuthenticator']).lower()
+titan_server_simple_authenticator = ""
+if titan_server_sasl == "true" and 'knox-env' not in config['configurations']:
+  titan_server_simple_authenticator = """authentication: {
+  className: org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator,
+  config: {
+    credentialsDb: conf/tinkergraph-empty.properties,
+    credentialsDbLocation: data/credentials.kryo}}"""
+
+titan_server_ssl= str(config['configurations']['titan-env']['ssl.enabled']).lower()
+titan_server_ssl_key_cert_file = default('/configurations/titan-env/ssl.keyCertChainFile', None)
+if titan_server_ssl_key_cert_file:
+  titan_server_ssl_key_cert_file = format(", keyCertChainFile: {titan_server_ssl_key_cert_file}")
+titan_server_ssl_key_file = default('/configurations/titan-env/ssl.keyFile', None)
+if titan_server_ssl_key_file:
+  titan_server_ssl_key_file = format(", keyFile: {titan_server_ssl_key_file}")
+titan_server_ssl_key_password = default('/configurations/titan-env/ssl.keyPassword', None)
+if titan_server_ssl_key_password:
+  titan_server_ssl_key_password = format(", keyPassword: {titan_server_ssl_key_password}")
+titan_server_ssl_trust_cert_chain_file=default('/configurations/titan-env/ssl.trustCertChainFile', None)
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_config_dir = conf_select.get_hadoop_conf_dir()
+hbase_config_dir = format('{stack_root}/current/hbase-client/conf')
+
+# Titan SparkGraphComputer configuration
+yarn_home_dir = format('{stack_root}/current/hadoop-yarn-client')
+spark_home_dir = format('{stack_root}/current/spark2-client')
+spark_config_dir = format('{stack_root}/current/spark2-client/conf')
+titan_home_dir = format('{stack_root}/current/titan-client')
+titan_conf_dir = format('{stack_root}/current/titan-client/conf')
+titan_conf_hadoop_graph_dir = format('{stack_root}/current/titan-client/conf/hadoop-graph')
+hadoop_lib_native_dir = format('{stack_root}/current/hadoop-client/lib/native')
+titan_hadoop_gryo_props = config['configurations']['hadoop-gryo']['content']
+hadoop_hbase_read_props = config['configurations']['hadoop-hbase-read']['content']
+titan_hdfs_data_dir = "/user/titan/data"
+titan_hdfs_spark_lib_dir = "/user/spark/share/lib/spark"
+titan_ext_spark_plugin_dir = format('{stack_root}/current/titan-server/ext/spark-client/plugin')
+platform_name = format('{stack_root}').split('/')[2]
+titan_spark2_archive_dir = format('/{platform_name}/apps/{full_stack_version}/spark2')
+titan_spark2_archive_file = format('spark2-{platform_name}-yarn-archive.tar.gz')
+local_components = default("/localComponents", [])
+yarn_client_installed = ( 'YARN_CLIENT' in local_components)
+hbase_master_installed = ( 'HBASE_CLIENT' in local_components)
+
+# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
+# The host name should be zooKeeper quorum
+storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_port = config['configurations']['zoo.cfg']['clientPort']
+storage_host_list = []
+titan_zookeeper_solr_host_list = []
+for hostname in storage_hosts:
+  titan_zookeeper_solr_hostname = hostname+format(':{zookeeper_port}/solr')
+  titan_zookeeper_solr_host_list.append(titan_zookeeper_solr_hostname)
+  storage_host_list.append(hostname)
+storage_host = ",".join(storage_host_list)
+zookeeper_solr_for_titan_hostname  = ",".join(titan_zookeeper_solr_host_list)
+hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+if 'titan_server_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['titan_server_hosts']) > 0:
+  titan_host = config['clusterHostInfo']['titan_server_hosts'][0]
+
+# jts jar should be copy to solr site
+titan_dir = format('{stack_root}/current/titan-client')
+titan_ext_dir = format('{stack_root}/current/titan-client/ext')
+titan_solr_conf_dir = format('{stack_root}/current/titan-client/conf/solr')
+titan_solr_jar_file = format('{stack_root}/current/titan-client/lib/jts-1.13.jar')
+# jaas file for solr when security is enabled
+titan_solr_jaas_file = format('{titan_solr_conf_dir}/titan_solr_jaas.conf')
+titan_solr_client_jaas_file = format('{titan_solr_conf_dir}/titan_solr_client_jaas.conf')
+titan_solr_client_jaas_config = "index.search.solr.jaas-file=" + format('{titan_solr_conf_dir}/titan_solr_client_jaas.conf')
+if not security_enabled:
+  titan_solr_client_jaas_config=""
+# config for solr collection creation
+index = 0
+zookeeper_quorum = ""
+for host in config['clusterHostInfo']['zookeeper_hosts']:
+  zookeeper_quorum += host + ":" + str(zookeeper_port)
+  index += 1
+  if index < len(config['clusterHostInfo']['zookeeper_hosts']):
+    zookeeper_quorum += ","
+if "solr-env" in config['configurations']:
+    solr_znode = default('/configurations/solr-env/solr_znode', '/solr')
+infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
+infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
+titan_solr_shards = 1
+titan_solr_hdfs_dir = "/apps/titan"
+titan_solr_hdfs_conf_dir = "/apps/titan/conf"
+titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
+titan_tmp_dir = format('{tmp_dir}/titan')
+titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
+configuration_tags = config['configurationTags']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+titan_hdfs_mode = 0775
+solr_conf_dir=format('{stack_root}/current/solr-server/conf')
+titan_solr_configset = 'titan'
+titan_solr_collection_name = 'titan'
+solr_port=config['configurations']['solr-env']['solr_port']
+solr_user= solr_user=config['configurations']['solr-env']['solr_user']
+solr_conf_trg_file = format('{stack_root}/current/solr-server/server/solr/configsets/{titan_solr_configset}/conf/solrconfig.xml')
+#for create_hdfs_directory
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
new file mode 100755
index 0000000..edc264f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
+
+# server configurations
+config = Script.get_config()
+
+titan_pid_dir = config['configurations']['titan-env']['titan_pid_dir']
+titan_pid_file = format("{titan_pid_dir}/titan.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
new file mode 100755
index 0000000..118eea3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate, StaticFile
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanServiceCheck(Script):
+    pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanServiceCheckDefault(TitanServiceCheck):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+	
+        File( format("{tmp_dir}/titanSmoke.groovy"),
+              content = StaticFile("titanSmoke.groovy"),
+              mode = 0755
+              )
+
+
+        if params.security_enabled:
+            kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+            Execute(kinit_cmd,
+                    user=params.smokeuser
+                    )
+        
+	secure=""
+        if params.titan_server_ssl == "true" :
+                secure="-k"
+                if params.titan_server_ssl_key_cert_file:
+                    secure="--cacert " + params.titan_server_ssl_key_cert_file.split(":")[1]
+        grepresult=""" | grep 99"""
+        if len(params.titan_server_simple_authenticator) > 0:
+            grepresult = ""
+        headers=""" -XPOST -Hcontent-type:application/json -d '{"gremlin":"100-1"}' """
+        http="http://"
+        if params.titan_server_ssl == "true":
+            http="https://"
+        titan_server_host = http + format("{titan_host}")
+        titan_port=format("{titan_server_port}")
+        cmd = "curl " + secure + headers + titan_server_host + ":" + titan_port + grepresult
+       
+        Execute((cmd),
+                tries     = 40,
+                try_sleep = 5,
+                path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                user      = params.smokeuser,
+                logoutput = True
+                )
+
+        Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
+                tries     = 3,
+                try_sleep = 5,
+                path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                user      = params.smokeuser,
+                logoutput = True
+                )
+
+if __name__ == "__main__":
+    # print "Track service check status"
+    TitanServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
new file mode 100755
index 0000000..43dcb2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
@@ -0,0 +1,143 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.core.source import InlineTemplate, StaticFile
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def titan(type = None, upgrade_type=None):
+    import params
+    import params_server
+    if type == 'server':
+        File(format("{params.titan_server_conf_dir}/gremlin-server.yaml"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.gremlin_server_configs)
+             )
+        credentials_file = format("{params.titan_data_dir}/credentials.kryo")
+        if not os.path.isfile(credentials_file):
+             File(credentials_file,
+                  mode=0644,
+                  group=params.user_group,
+                  owner=params.titan_user,
+                  content=""
+                  )
+        credentials_property_file = format("{params.titan_conf_dir}/tinkergraph-empty.properties")
+        if not os.path.isfile(credentials_property_file):
+             File(credentials_property_file,
+                  mode=0644,
+                  group=params.user_group,
+                  owner=params.titan_user,
+                  content=StaticFile("tinkergraph-empty.properties")
+                  )
+        Directory(params.titan_log_dir,
+                  create_parents=True,
+                  owner=params.titan_user,
+                  group=params.user_group,
+                  mode=0775
+                  )
+        Directory(params_server.titan_pid_dir,
+                  create_parents=True,
+                  owner=params.titan_user,
+                  group=params.user_group,
+                  mode=0775
+                  )
+        File(format("{params.titan_bin_dir}/gremlin-server-script.sh"),
+             mode=0755,
+             group='root',
+             owner='root',
+             content = StaticFile("gremlin-server-script.sh")
+             )
+
+    Directory(params.titan_conf_dir,
+              create_parents = True,
+              owner=params.titan_user,
+              group=params.user_group
+              )
+
+    File(format("{params.titan_conf_dir}/titan-env.sh"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.titan_env_props)
+             )
+    jaas_client_file = format('{titan_solr_client_jaas_file}')
+
+    if not os.path.isfile(jaas_client_file) and params.security_enabled:
+        File(jaas_client_file,
+             owner   = params.titan_user,
+             group   = params.user_group,
+             mode    = 0644,
+             content = Template('titan_solr_client_jaas.conf.j2')
+             )
+
+# SparkGraphComputer
+    Directory(params.titan_conf_hadoop_graph_dir,
+              create_parents = True,
+              owner=params.titan_user,
+              group=params.user_group
+              )
+
+    File(format("{params.titan_conf_hadoop_graph_dir}/hadoop-gryo.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hadoop_gryo_props)
+         )
+
+    File(format("{params.titan_conf_hadoop_graph_dir}/hadoop-hbase-read.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.hadoop_hbase_read_props)
+         )
+
+    # titan-hbase-solr_properties is always set to a default even if it's not in the payload
+    File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hbase_solr_props)
+         )
+
+    if (params.log4j_console_props != None):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.log4j_console_props)
+             )
+    elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user
+             )
+    # Change titan ext directory for multiple user access
+    Directory(params.titan_ext_dir,
+               create_parents = True,
+               owner=params.titan_user,
+                     group=params.user_group,
+               mode=0775
+               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
new file mode 100755
index 0000000..9bb1aad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
@@ -0,0 +1,61 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+import titan
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanClient(Script):
+    def get_component_name(self):
+        return "titan-client"
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+        titan.titan()
+
+    def status(self, env):
+        raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanClientLinux(TitanClient):
+
+    def pre_rolling_restart(self, env):
+        import params
+        env.set_params(params)
+
+        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+            conf_select.select(params.stack_name, "titan", params.version)
+            stack_select.select("titan-client", params.version)
+
+    def install(self, env):
+        self.install_packages(env)
+        self.configure(env)
+
+if __name__ == "__main__":
+    TitanClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
new file mode 100755
index 0000000..5dcc7e9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from titan_service import titan_service
+import titan
+
+class TitanServer(Script):
+  def get_component_name(self):
+    return "titan-server"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    titan.titan(type='server', upgrade_type=upgrade_type)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select("titan-server", params.version)
+      conf_select.select(params.stack_name, "titan", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    titan_service(action = 'start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    titan_service(action = 'stop')
+
+  def status(self, env, upgrade_type=None):
+    import params_server
+    check_process_status(params_server.titan_pid_file)
+
+if __name__ == "__main__":
+  TitanServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
new file mode 100755
index 0000000..f958599
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
@@ -0,0 +1,150 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.libraries.functions.validate import call_and_match_output
+from resource_management.libraries.functions import solr_cloud_util
+from resource_management.libraries.resources.xml_config import XmlConfig
+
+def titan_service(action='start'):
+  import params
+  import params_server
+  cmd = format("{titan_bin_dir}/gremlin-server-script.sh")
+  cmd_params = params_server.titan_pid_file + " " + params.titan_log_file +" " + params.titan_err_file + " " +  params.titan_bin_dir + " " + params.titan_server_conf_dir + " " +params.titan_log_dir
+  if action == 'start':
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {titan_keytab_path} {titan_jaas_princ};")
+      Execute(kinit_cmd,
+              user=params.titan_user
+              )
+    XmlConfig("hbase-site.xml",
+              not_if = params.hbase_master_installed,
+              conf_dir=params.titan_conf_dir,
+              configurations=params.config['configurations']['hbase-site'],
+              configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+              group=params.user_group,
+              owner=params.titan_user,
+              mode=0644
+              )
+
+    #Add for SparkGraphComputer, prepare dir /user/titan/data on HDFS, and upload spark jars to /user/spark/share/lib/spark for spark.yarn.jars of Spark on YARN.
+    #create hdfs dir /user/titan/data
+    titan_create_data_dir_command = format("hadoop fs -mkdir -p {titan_hdfs_data_dir}; hadoop fs -chown -R titan:hdfs /user/titan")
+    titan_data_exist_command = format("hadoop fs -test -e {titan_hdfs_data_dir}>/dev/null 2>&1")
+    Execute(titan_create_data_dir_command,
+            not_if = titan_data_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #create spark plugin dir for spark jars
+    titan_create_spark_plugin_dir_command = format("mkdir -p {titan_ext_spark_plugin_dir}")
+    titan_ext_spark_plugin_dir_exist_command = format("ls {titan_ext_spark_plugin_dir}>/dev/null 2>&1")
+    Execute(titan_create_spark_plugin_dir_command,
+            not_if = titan_ext_spark_plugin_dir_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #get spark arhive from hdfs
+    titan_get_spark_tar_command = format("hadoop fs -get {titan_spark2_archive_dir}/{titan_spark2_archive_file} {titan_ext_spark_plugin_dir}")
+    titan_sparktargz_exist_command= format("ls {titan_ext_spark_plugin_dir}/{titan_spark2_archive_file}>/dev/null 2>&1")
+    Execute(titan_get_spark_tar_command,
+            not_if = titan_sparktargz_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #extract spark targz
+    titan_x_spark_targz_command = format("tar -xzvf {titan_ext_spark_plugin_dir}/{titan_spark2_archive_file} -C {titan_ext_spark_plugin_dir}/>/dev/null 2>&1")
+    titan_sparkjars_exist_command= format("ls {titan_ext_spark_plugin_dir}/*.jar>/dev/null 2>&1")
+    Execute(titan_x_spark_targz_command,
+            not_if = titan_sparkjars_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #create hdfs dir /user/spark/share/lib/spark
+    titan_create_spark_dir_command = format("hadoop fs -mkdir -p {titan_hdfs_spark_lib_dir}")
+    titan_spark_exist_command = format("hadoop fs -test -e {titan_hdfs_spark_lib_dir}>/dev/null 2>&1")
+    Execute(titan_create_spark_dir_command,
+            not_if = titan_spark_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #upload spark jars to hdfs /user/spark/share/lib/spark
+    titan_put_spark_jar_command = format("hadoop fs -put -f {titan_ext_spark_plugin_dir}/* {titan_hdfs_spark_lib_dir}; hadoop fs -rm -r {titan_hdfs_spark_lib_dir}/guava*.jar; hadoop fs -put -f {titan_home_dir}/lib/guava*.jar {titan_hdfs_spark_lib_dir}")
+    titan_sparkjar_exist_command = format("hadoop fs -test -e {titan_hdfs_spark_lib_dir}/*.jar>/dev/null 2>&1")
+    Execute(titan_put_spark_jar_command,
+            not_if = titan_sparkjar_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #rm guava*.jar slf4j-log4j12*.jar spark-core*.jar for conflict
+    titan_rm_conflict_jars_command = format("rm -rf {titan_ext_spark_plugin_dir}/guava*.jar; rm -rf {titan_ext_spark_plugin_dir}/slf4j-log4j12*.jar; rm -rf {titan_ext_spark_plugin_dir}/spark-core*.jar; ")
+    titan_guava_exist_command = format("ls {titan_ext_spark_plugin_dir}/guava*.jar>/dev/null 2>&1")
+    Execute(titan_rm_conflict_jars_command,
+            only_if = titan_guava_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #generate yarn-site.xml in Titan conf if no yarn-client installed
+    XmlConfig("yarn-site.xml",
+              not_if = params.yarn_client_installed,
+              conf_dir=params.titan_conf_dir,
+              configurations=params.config['configurations']['yarn-site'],
+              configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+              group=params.user_group,
+              owner=params.titan_user,
+              mode=0644
+              )
+
+    #create jaas file for solr when security enabled
+    jaas_file = format('{titan_solr_jaas_file}')
+    if not os.path.isfile(jaas_file) and params.security_enabled:
+      File(jaas_file,
+           owner   = params.titan_user,
+           group   = params.user_group,
+           mode    = 0644,
+           content = Template('titan_solr_jaas.conf.j2')
+           )
+    #upload config to zookeeper
+    solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.titan_solr_configset,
+        config_set_dir = params.titan_solr_conf_dir,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java64_home,
+        jaas_file=jaas_file,
+        retry=30, interval=5)
+
+    #create solr collection
+    solr_cloud_util.create_collection(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        collection = params.titan_solr_collection_name,
+        config_set = params.titan_solr_configset,
+        java64_home = params.java64_home,
+        shards = params.titan_solr_shards,
+        replication_factor = int(params.infra_solr_replication_factor),
+        jaas_file = jaas_file)
+
+    daemon_cmd = format(cmd+" start " + cmd_params)
+    no_op_test = format("ls {params_server.titan_pid_file} >/dev/null 2>&1 && ps `cat {params_server.titan_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.titan_user
+    )
+      
+  elif action == 'stop':
+    import params_server
+    daemon_cmd = format("{titan_bin_dir}/gremlin-server-script.sh stop " + params_server.titan_pid_file)
+    Execute(daemon_cmd, user=params.titan_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
new file mode 100755
index 0000000..cd4b53e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=false
+ useTicketCache=true;
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
new file mode 100755
index 0000000..bf562f8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ storeKey=true
+ useTicketCache=false
+ keyTab="{{titan_keytab_path}}"
+ principal="{{titan_jaas_princ}}";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
index 35fc0d8..dc4811b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
@@ -4,14 +4,22 @@
   "general_deps" : {
     "_comment" : "dependencies for all cases",
     "HIVE_SERVER_INTERACTIVE-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START", "MYSQL_SERVER-START"],
-    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP"],
+    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP", "KERNEL_GATEWAY-STOP" ],
     "NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "KERNEL_GATEWAY-STOP" ],
     "NAMENODE-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
     "HIVE_SERVER_INTERACTIVE-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
     "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START", "HIVE_SERVER_INTERACTIVE-START"],
     "RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"],
     "SPARK2_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK2_JOBHISTORYSERVER-START", "APP_TIMELINE_SERVER-START"],
-    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"]
+    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"],
+    "TITAN_SERVER-START" : ["HBASE_SERVICE_CHECK-SERVICE_CHECK", "SOLR-START"],
+    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["TITAN_SERVER-START"],
+    "KERNEL_GATEWAY-INSTALL": ["SPARK2_CLIENT-INSTALL"],
+    "PYTHON_CLIENT-INSTALL": ["KERNEL_GATEWAY-INSTALL"],
+    "KERNEL_GATEWAY-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "SPARK2_JOBHISTORYSERVER-START"],
+    "JNBG_SERVICE_CHECK-SERVICE_CHECK": ["KERNEL_GATEWAY-START"],
+    "R4ML-INSTALL": ["SPARK2_CLIENT-INSTALL", "SYSTEMML-INSTALL"],
+    "R4ML_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", "SPARK2_JOBHISTORYSERVER-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
new file mode 100755
index 0000000..3520a32
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>JNBG</name>
+      <extends>common-services/JNBG/0.2.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
new file mode 100755
index 0000000..d1c708d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>R4ML</name>
+      <version>0.8.0</version>
+      <extends>common-services/R4ML/0.8.0</extends>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>r4ml_4_2_5_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
new file mode 100755
index 0000000..7a0e125
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <version>0.13.0</version>
+      <extends>common-services/SYSTEMML/0.10.0</extends>		
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>*systemml_4_2_5_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
new file mode 100755
index 0000000..d00e707
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <version>1.0.0</version>
+      <extends>common-services/TITAN/1.0.0</extends>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan_4_2_5_*</name>
+            </package>
+	        <package>
+	          <name>ambari-infra-solr-client-*</name>
+	        </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
index 1caa307..8883f57 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
@@ -26,7 +26,9 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     parentRecommendConfDict = super(BigInsights425StackAdvisor, self).getServiceConfigurationRecommenderDict()
     childRecommendConfDict = {
       "HDFS": self.recommendHDFSConfigurations,
+      "JNBG": self.recommendJNBGConfigurations,
       "SOLR": self.recommendSolrConfigurations,
+      "TITAN": self.recommendTitanConfigurations,
       "RANGER": self.recommendRangerConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
@@ -35,11 +37,55 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
   def getServiceConfigurationValidators(self):
     parentValidators = super(BigInsights425StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {
+      "JNBG": {"jnbg-env": self.validateJNBGConfigurations},
       "SOLR": {"ranger-solr-plugin-properties": self.validateSolrRangerPluginConfigurations}
     }
     self.mergeValidators(parentValidators, childValidators)
     return parentValidators
 
+  def recommendJNBGConfigurations(self, configurations, clusterData, services, hosts):
+    putJNBGEnvProperty = self.putProperty(configurations, "jnbg-env", services)
+    putJNBGEnvPropertyAttribute = self.putPropertyAttribute(configurations, "jnbg-env")
+   
+    distro_version = platform.linux_distribution()[1]
+    # On RHEL 6.x default path does not point to a Python 2.7
+    # so empty out the field and force user to update the path
+    if distro_version < "7.0":
+      putJNBGEnvProperty('python_interpreter_path', "")
+
+  def validateJNBGConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    jnbg_env = getSiteProperties(configurations, "jnbg-env")
+    py_exec = jnbg_env.get("python_interpreter_path") if jnbg_env and "python_interpreter_path" in jnbg_env else []
+
+    # Test that it is a valid executable path before proceeding
+    if not os.path.isfile(py_exec) and not os.access(py_exec, os.X_OK):
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Invalid Python interpreter path specified")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    distro_version = platform.linux_distribution()[1]
+    if distro_version < "7.0" and (py_exec == "/opt/rh/python27/root/usr/bin/python" or py_exec == "/opt/rh/python27/root/usr/bin/python2" or py_exec == "/opt/rh/python27/root/usr/bin/python2.7"):
+      # Special handling for RHSCL Python 2.7
+      proc = Popen(['/usr/bin/scl', 'enable', 'python27', '/opt/rh/python27/root/usr/bin/python' ' -V'], stderr=PIPE)
+    else:
+      proc = Popen([py_exec, '-V'], stderr=PIPE)
+    py_string = proc.communicate()[1]
+    py_version = py_string.split()[1]
+
+    if "Python" not in py_string:
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Path specified does not appear to be a Python interpreter")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    # Validate that the specified python is 2.7.x (not > 2.x.x and not < 2.7)
+    if not py_version.split('.')[0] == '2' or (py_version.split('.')[0] == '2' and py_version.split('.')[1] < '7'):
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Specified Python interpreter must be version >= 2.7 and < 3.0")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
   def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
     putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
@@ -50,6 +96,13 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     zookeeper_host_port = ",".join(zookeeper_host_port)
     ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'solr')
     putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+
+  def recommendTitanConfigurations(self, configurations, clusterData, services, hosts):
+    putTitanPropertyAttribute = self.putPropertyAttribute(configurations, "titan-env")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    knox_enabled = "KNOX" in servicesList
+    if knox_enabled:
+      putTitanPropertyAttribute("SimpleAuthenticator", "visible", "false")
  
   def recommendSolrConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendSolrConfigurations(configurations, clusterData, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index f3c73a0..2c82cb3 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -107,7 +107,7 @@
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
         <task xsi:type="execute" hosts="master">
           <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
+          <function>take_snapshot</function>
         </task>
       </execute-stage>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
index cc45213..5ee4b32 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
@@ -19,7 +19,8 @@
     "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["FALCON_SERVER-START"],
-    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"]
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_SERVICE_CHECK-SERVICE_CHECK"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
new file mode 100755
index 0000000..b73e31e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <displayName>SystemML</displayName>
+      <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
+      <version>0.10.0.4.2</version>
+      <components>
+        <component>
+          <name>SYSTEMML</name>
+          <displayName>SystemML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/systemml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>apache_systemml*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
new file mode 100755
index 0000000..dd7e46c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+systemml_home_dir = format("{stack_root}/current/systemml-client")
+systemml_lib_dir = format("{systemml_home_dir}/lib")
+systemml_scripts_dir = format("{systemml_home_dir}/scripts")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
new file mode 100755
index 0000000..c15b907
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class SystemMLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+        
+        if os.path.exists(params.systemml_lib_dir):
+            cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
+            java = format("{params.java_home}/bin/java")
+            command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
+            process = subprocess.Popen(command, stdout=subprocess.PIPE)
+            output = process.communicate()[0]
+            print output
+        
+            if 'Apache SystemML' not in output:
+                raise Fail("Expected output Apache SystemML not found.")
+
+if __name__ == "__main__":
+    SystemMLServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
new file mode 100755
index 0000000..2d45b68
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+#from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+
+class SystemMLClient(Script):
+
+  def get_component_name(self):
+    return "systemml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      #conf_select.select(params.stack_name, "systemml", params.version)
+      stack_select.select("systemml-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SystemMLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
new file mode 100755
index 0000000..86e09f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_do_not_extend="true">
+
+  <property>
+    <name>titan_user</name>
+    <description>User to run Titan as</description>
+    <property-type>USER</property-type>
+    <value>titan</value>
+  </property>
+
+  <property>
+    <name>content</name>
+    <description>This is the template for titan-env.sh file</description>
+    <value>
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Add hadoop and hbase configuration directories into classpath
+export HADOOP_CONF_DIR={{hadoop_config_dir}}
+export HBASE_CONF_DIR={{hbase_config_dir}}
+CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
new file mode 100755
index 0000000..0ca6807
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true" supports_do_not_extend="true">
+
+  <property>
+    <name>content</name>
+    <description>Describe the configurations for Solr</description>
+    <value># Titan configuration sample: HBase and Solr
+# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
+
+# This file connects to HBase using a Zookeeper quorum
+# (storage.hostname) consisting solely of localhost. It also
+# connects to Solr running on localhost using Solr's HTTP API.
+# Zookeeper, the HBase services, and Solr must already be
+# running and available before starting Titan with this file.
+storage.backend=hbase
+storage.hostname={{storage_host}}
+storage.hbase.table=titan_solr
+storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+cache.db-cache = true
+cache.db-cache-clean-wait = 20
+cache.db-cache-time = 180000
+cache.db-cache-size = 0.5
+
+# The indexing backend used to extend and optimize Titan's query
+# functionality. This setting is optional. Titan can use multiple
+# heterogeneous index backends. Hence, this option can appear more than
+# once, so long as the user-defined name between "index" and "backend" is
+# unique among appearances.Similar to the storage backend, this should be
+# set to one of Titan's built-in shorthand names for its standard index
+# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
+# package and classname of a custom/third-party IndexProvider
+# implementation.
+
+index.search.backend=solr
+index.search.solr.mode=cloud
+index.search.solr.zookeeper-url={{solr_server_host}}/solr
+index.search.solr.configset=titan
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
new file mode 100755
index 0000000..3363d81
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_do_not_extend="true">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j-console.properties</description>
+    <value>
+      # Used by gremlin.sh
+
+      log4j.appender.A2=org.apache.log4j.ConsoleAppender
+      log4j.appender.A2.Threshold=TRACE
+      log4j.appender.A2.layout=org.apache.log4j.PatternLayout
+      log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
+
+      log4j.rootLogger=${gremlin.log4j.level}, A2
+
+      #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
+      #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
+
+      # Disable spurious Hadoop config deprecation warnings under 2.2.0.
+      #
+      # See https://issues.apache.org/jira/browse/HADOOP-10178
+      #
+      # This can and should be deleted when we upgrade our Hadoop 2.2.0
+      # dependency to 2.3.0 or 3.0.0.
+      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
+
+      # Configure MR at its own loglevel. We usually want MR at INFO,
+      # even if the rest of the loggers are at WARN or ERROR or FATAL,
+      # because job progress information is at INFO.
+      log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
+      log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
+
+      # This generates 3 INFO lines per jar on the classpath -- usually more
+      # noise than desirable in the REPL. Switching it to the default
+      # log4j level means it will be at WARN by default, which is ideal.
+      log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
new file mode 100755
index 0000000..ccabbf0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "TITAN",
+      "components": [
+        {
+          "name": "TITAN",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
new file mode 100755
index 0000000..73f4635
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <displayName>Titan</displayName>
+      <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
+        billions of vertices and edges distributed across a multi-machine cluster.</comment>
+      <version>1.0.0</version>
+      <components>
+        <component>
+          <name>TITAN</name>
+          <displayName>Titan</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/titan_client.py</script>
+            <scriptType>PYTHON</scriptType>
+	        <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+               <type>env</type>
+               <fileName>titan-env.sh</fileName>
+               <dictionaryName>titan-env</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>log4j-console.properties</fileName>
+                <dictionaryName>titan-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>titan-hbase-solr.properties</fileName>
+                <dictionaryName>titan-hbase-solr</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan_4_2_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>		
+        <service>HDFS</service>
+        <service>HBASE</service>
+        <service>SOLR</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+        <config-type>titan-env</config-type>
+        <config-type>titan-hbase-solr</config-type>
+        <config-type>titan-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
new file mode 100755
index 0000000..79438be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
@@ -0,0 +1,20 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+import com.thinkaurelius.titan.core.TitanFactory;
+
+graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
+g = graph.traversal()
+l = g.V().values('name').toList()


[39/50] [abbrv] ambari git commit: AMBARI-21474. HBase REST Server is missing after Ambari upgrade

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_service.py
new file mode 100644
index 0000000..0d6d50d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_service.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import errno
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import check_process_status, format
+
+# Note: Phoenix Query Server is only applicable to phoenix version stacks and above.
+def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
+    # Note: params/status_params should already be imported before calling phoenix_service()
+    pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == "status":
+      check_process_status(pid_file)
+    else:
+      env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
+      daemon_cmd = format("{phx_daemon_script} {action}")
+      if action == 'start':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env)
+  
+      elif action == 'stop':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env
+        )
+        try:
+          File(pid_file, action = "delete")
+        except OSError as exc:
+          # OSError: [Errno 2] No such file or directory
+          if exc.errno == errno.ENOENT:
+            Logger.info("Did not remove '{0}' as it did not exist".format(pid_file))
+          else:
+            raise

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..a440c10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import functions
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseServiceCheckWindows(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseServiceCheckDefault(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+    hbase_servicecheck_cleanup_file = format("{exec_tmp_dir}/hbase-smoke-cleanup.sh")
+
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+
+    File(hbase_servicecheck_cleanup_file,
+      content = StaticFile("hbase-smoke-cleanup.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+    cleanupCmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_cleanup_file}")
+    Execute(format("{servicecheckcmd} && {smokeverifycmd} && {cleanupCmd}"),
+      tries     = 6,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/setup_ranger_hbase.py
new file mode 100644
index 0000000..0d73e39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/setup_ranger_hbase.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
+  import params
+
+  if params.has_ranger_admin:
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_hbase and params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master' :
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseMaster",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseRegional",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar, params.downloaded_custom_connector,
+                          params.driver_curl_source, params.driver_curl_target, params.java64_home,
+                          params.repo_name, params.hbase_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                          component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                          plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                          component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos if params.security_enabled else None,
+                          component_user_principal=params.ranger_hbase_principal if params.security_enabled else None,
+                          component_user_keytab=params.ranger_hbase_keytab if params.security_enabled else None)
+
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.hbase_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                        component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                        plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                        component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..579b998
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HBASE_REST_SERVER' : 'hbase-restserver',
+  'HBASE_CLIENT' : 'hbase-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  hbase_master_win_service_name = "master"
+  hbase_regionserver_win_service_name = "regionserver"
+else:
+  pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+  hbase_user = config['configurations']['hbase-env']['hbase_user']
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+  
+  stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+  stack_version_formatted = format_stack_version(stack_version_unformatted)
+  stack_root = Script.get_stack_root()
+
+  hbase_conf_dir = "/etc/hbase/conf"
+  limits_conf_dir = "/etc/security/limits.d"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    hbase_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+    if not os.path.exists(hbase_conf_dir):
+      hbase_conf_dir = format("{stack_root}/current/hbase-client/conf")
+
+stack_name = default("/hostLevelParams/stack_name", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/upgrade.py
new file mode 100644
index 0000000..b1a19e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/upgrade.py
@@ -0,0 +1,65 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.core import shell
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions import check_process_status
+
+def prestart(env, stack_component):
+  import params
+
+  if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+    conf_select.select(params.stack_name, "hbase", params.version)
+    stack_select.select(stack_component, params.version)
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  call_and_match(exec_cmd, params.hbase_user, params.hostname + ":", re.IGNORECASE)
+
+
+def is_region_server_process_running():
+  try:
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+    return True
+  except ComponentIsNotRunning:
+    return False
+
+@retry(times=30, sleep_time=30, err_class=Fail) # keep trying for 15 mins
+def call_and_match(cmd, user, regex, regex_search_flags):
+
+  if not is_region_server_process_running():
+    Logger.info("RegionServer process is not running")
+    raise Fail("RegionServer process is not running")
+
+  code, out = shell.call(cmd, user=user)
+
+  if not (out and re.search(regex, out, regex_search_flags)):
+    raise Fail("Could not verify RS available")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..7763bdd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,117 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period={{metrics_collection_period}}
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period={{metrics_collection_period}}
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period={{metrics_collection_period}}
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period={{metrics_collection_period}}
+hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+{% endif %}
+
+{% if has_ganglia_server %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663
+
+{% endif %}
+
+# Disable HBase metrics for regions/tables/regionservers by default.
+*.source.filter.class=org.apache.hadoop.metrics2.filter.RegexFilter
+hbase.*.source.filter.exclude=.*(Regions|Users|Tables).*

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..dcec3d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,116 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period={{metrics_collection_period}}
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period={{metrics_collection_period}}
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period={{metrics_collection_period}}
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period={{metrics_collection_period}}
+hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+{% endif %}
+
+{% if has_ganglia_server %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
+
+{% endif %}
+
+# Disable HBase metrics for regions/tables/regionservers by default.
+*.source.filter.class=org.apache.hadoop.metrics2.filter.RegexFilter
+hbase.*.source.filter.exclude=.*(Regions|Users|Tables).*

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase.conf.j2
new file mode 100644
index 0000000..3580db0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hbase_user}}   - nofile   {{hbase_user_nofile_limit}}
+{{hbase_user}}   - nproc    {{hbase_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..21acfd6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+grant '{{titan_user}}', '{{titan_user_permissions}}', '{{titan_user_hbase_namespace}}'
+exit

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_queryserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_queryserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_queryserver_jaas.conf.j2
new file mode 100644
index 0000000..c5a6c3f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_queryserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{queryserver_keytab_path}}"
+principal="{{queryserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_rest_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
new file mode 100644
index 0000000..2dc6988
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{rest_server_keytab_path}}"
+principal="{{rest_server_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file


[06/50] [abbrv] ambari git commit: Revert "AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)"

Posted by jo...@apache.org.
Revert "AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)"

This reverts commit cb86bf06f878efeccdb38ec87eb160eac2e6ed57.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c2b2210b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c2b2210b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c2b2210b

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: c2b2210b3635e800c17621dbdbadec7761a988c1
Parents: da44c5c
Author: Di Li <di...@apache.org>
Authored: Wed Jul 12 14:50:55 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Wed Jul 12 14:50:55 2017 -0400

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_service.py    | 17 -----------------
 .../0.96.0.2.0/package/scripts/params_linux.py     |  9 ---------
 .../BigInsights/4.2.5/upgrades/config-upgrade.xml  | 11 -----------
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml     |  5 -----
 .../BigInsights/4.2/upgrades/config-upgrade.xml    | 11 -----------
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml |  7 +------
 6 files changed, 1 insertion(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index 2e2fa10..a1003dc 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -19,7 +19,6 @@ limitations under the License.
 """
 
 from resource_management import *
-from resource_management.core.logger import Logger
 
 def hbase_service(
   name,
@@ -33,22 +32,6 @@ def hbase_service(
     pid_expression = as_sudo(["cat", pid_file])
     no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
     
-    # delete wal log if HBase version has moved down
-    if params.to_backup_wal_dir:
-      wal_directory = params.wal_directory
-      timestamp = datetime.datetime.now()
-      format = '%Y%m%d%H%M%S'
-      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(format))
-
-      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
-      try:
-        Execute ( rm_cmd,
-          user = params.hbase_user
-        )
-      except Exception, e:
-        #Should still allow HBase Start/Stop to proceed
-        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
-
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 6617a80..1ee5248 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -44,7 +44,6 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.expect import expect
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
-from resource_management.libraries.functions.constants import Direction
 
 # server configurations
 config = Script.get_config()
@@ -441,11 +440,3 @@ if has_atlas:
   atlas_with_managed_hbase = len(zk_hosts_matches) > 0
 else:
   atlas_with_managed_hbase = False
-
-wal_directory = "/apps/hbase/data/MasterProcWALs"
-
-backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
-
-#Need to make sure not to keep removing WAL logs once EU is finalized.
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index b51a744..42999b2 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -52,17 +52,6 @@
       </component>
     </service>
     
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
-            <type>hbase-env</type>
-            <set key="backup_wal_dir" value="true"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index f3c73a0..a96ede9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -182,11 +182,6 @@
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
       
-      <!-- HBASE -->
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
-        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
-      </execute-stage>
-
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index b46f476..f9e3e15 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -52,17 +52,6 @@
       </component>
     </service>
     
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
-            <type>hbase-env</type>
-            <set key="backup_wal_dir" value="true"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 4867626..a96ede9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -181,12 +181,7 @@
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
-
-      <!-- HBASE -->
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
-        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
-      </execute-stage>
-
+      
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>


[24/50] [abbrv] ambari git commit: AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)

Posted by jo...@apache.org.
AMBARI-21471. ATS going down due to missing org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/afea7bb7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/afea7bb7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/afea7bb7

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: afea7bb72475fa3d6938aa049dd2db99f3adc133
Parents: d2c6d53
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Jul 13 22:35:28 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Jul 13 22:35:28 2017 -0700

----------------------------------------------------------------------
 .../4.0/stack-advisor/stack_advisor_25.py       |   4 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |   5 +-
 .../src/main/resources/stacks/stack_advisor.py  |  18 +++
 .../stacks/2.5/common/test_stack_advisor.py     | 150 +++++++++++--------
 4 files changed, 106 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/afea7bb7/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
index 1f0ae18..eb7d370 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
@@ -734,9 +734,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = "/usr/hdp"
-    if "cluster-env" in services["configurations"] and "stack_root" in services["configurations"]["cluster-env"]["properties"]:
-      stack_root = services["configurations"]["cluster-env"]["properties"]["stack_root"]
+    stack_root = self.getStackRoot(services)
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/afea7bb7/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 638e79a..97c49f3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import math
 
+
 from ambari_commons.str_utils import string_set_equals
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
@@ -775,9 +776,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
 
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
-    stack_root = "/usr/hdp"
-    if cluster_env and "stack_root" in cluster_env:
-      stack_root = cluster_env["stack_root"]
+    stack_root = self.getStackRoot(services)
 
     timeline_plugin_classes_values = []
     timeline_plugin_classpath_values = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/afea7bb7/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index e393b9c..1e0d83a 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -22,6 +22,7 @@ import os
 import re
 import socket
 import traceback
+import json
 
 class StackAdvisor(object):
   """
@@ -1144,6 +1145,23 @@ class DefaultStackAdvisor(StackAdvisor):
 
     return mount_points
 
+  def getStackRoot(self, services):
+    """
+    Gets the stack root associated with the stack
+    :param services: the services structure containing the current configurations
+    :return: the stack root as specified in the config or /usr/hdp
+    """
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    stack_root = "/usr/hdp"
+    if cluster_env and "stack_root" in cluster_env:
+      stack_root_as_str = cluster_env["stack_root"]
+      stack_roots = json.loads(stack_root_as_str)
+      stack_name = cluster_env["stack_name"]
+      if stack_name in stack_roots:
+        stack_root = stack_roots[stack_name]
+
+    return stack_root
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.

http://git-wip-us.apache.org/repos/asf/ambari/blob/afea7bb7/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 7bc9272..222f100 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -825,70 +825,80 @@ class TestHDP25StackAdvisor(TestCase):
 
     services = {
       "services": [{
-        "StackServices": {
-          "service_name": "YARN",
-        },
-        "Versions": {
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-      }, {
-        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
-        "StackServices": {
-          "service_name": "HIVE",
-          "service_version": "1.2.1.2.5",
-          "stack_name": "HDP",
-          "stack_version": "2.5"
-        },
-        "components": [
-          {
-            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "bulk_commands_display_name": "",
-              "bulk_commands_master_component_name": "",
-              "cardinality": "0-1",
-              "component_category": "MASTER",
-              "component_name": "HIVE_SERVER_INTERACTIVE",
-              "custom_commands": ["RESTART_LLAP"],
-              "decommission_allowed": "false",
-              "display_name": "HiveServer2 Interactive",
-              "has_bulk_commands_definition": "false",
-              "is_client": "false",
-              "is_master": "true",
-              "reassign_allowed": "false",
-              "recovery_enabled": "false",
-              "service_name": "HIVE",
-              "stack_name": "HDP",
-              "stack_version": "2.5",
-              "hostnames": ["c6401.ambari.apache.org"]
-            },
-            "dependencies": []
-          },
-          {
-            "StackServiceComponents": {
-              "advertise_version": "true",
-              "cardinality": "1+",
-              "component_category": "SLAVE",
-              "component_name": "NODEMANAGER",
-              "display_name": "NodeManager",
-              "is_client": "false",
-              "is_master": "false",
-              "hostnames": [
-                "c6403.ambari.apache.org"
-              ]
-            },
-            "dependencies": []
-          },
-        ]
-      }
+                     "StackServices": {
+                       "service_name": "TEZ"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "SPARK"
+                     }
+                   },
+                   {
+                     "StackServices": {
+                       "service_name": "YARN",
+                     },
+                     "Versions": {
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "StackServiceComponents": {
+                           "component_name": "NODEMANAGER",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         }
+                       }
+                     ]
+                   }, {
+                     "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+                     "StackServices": {
+                       "service_name": "HIVE",
+                       "service_version": "1.2.1.2.5",
+                       "stack_name": "HDP",
+                       "stack_version": "2.5"
+                     },
+                     "components": [
+                       {
+                         "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "bulk_commands_display_name": "",
+                           "bulk_commands_master_component_name": "",
+                           "cardinality": "0-1",
+                           "component_category": "MASTER",
+                           "component_name": "HIVE_SERVER_INTERACTIVE",
+                           "custom_commands": ["RESTART_LLAP"],
+                           "decommission_allowed": "false",
+                           "display_name": "HiveServer2 Interactive",
+                           "has_bulk_commands_definition": "false",
+                           "is_client": "false",
+                           "is_master": "true",
+                           "reassign_allowed": "false",
+                           "recovery_enabled": "false",
+                           "service_name": "HIVE",
+                           "stack_name": "HDP",
+                           "stack_version": "2.5",
+                           "hostnames": ["c6401.ambari.apache.org"]
+                         },
+                         "dependencies": []
+                       },
+                       {
+                         "StackServiceComponents": {
+                           "advertise_version": "true",
+                           "cardinality": "1+",
+                           "component_category": "SLAVE",
+                           "component_name": "NODEMANAGER",
+                           "display_name": "NodeManager",
+                           "is_client": "false",
+                           "is_master": "false",
+                           "hostnames": [
+                             "c6403.ambari.apache.org"
+                           ]
+                         },
+                         "dependencies": []
+                       },
+                     ]
+                   }
       ],
       "changed-configurations": [
         {
@@ -898,6 +908,12 @@ class TestHDP25StackAdvisor(TestCase):
         }
       ],
       "configurations": {
+        "cluster-env": {
+          "properties": {
+            "stack_root": "{\"HDP\":\"/usr/hdp\"}",
+            "stack_name": "HDP"
+          },
+        },
         "capacity-scheduler": {
           "properties": {
             "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
@@ -960,7 +976,8 @@ class TestHDP25StackAdvisor(TestCase):
             "tez.am.resource.memory.mb": "341"
           }
         }
-      }
+      },
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
     clusterData = {
@@ -990,6 +1007,9 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.server2.tez.default.queues'], 'default')
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'], 'default')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes'],
+                      'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath'], '/usr/hdp/${hdp.version}/spark/hdpLib/*')
     self.assertTrue('hive-interactive-env' not in configurations)
     self.assertTrue('property_attributes' not in configurations)
 


[02/50] [abbrv] ambari git commit: AMBARI-21455. Remove unnecessary services from BigInsights stack (alejandro)

Posted by jo...@apache.org.
AMBARI-21455. Remove unnecessary services from BigInsights stack (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/08f48c1e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/08f48c1e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/08f48c1e

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 08f48c1eb85a3763891584b835977809936f3a19
Parents: 4bbdd0e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 12 10:22:27 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Jul 12 11:31:17 2017 -0700

----------------------------------------------------------------------
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 --------
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 -----------
 .../SYSTEMML/package/scripts/__init__.py        |  19 ---
 .../services/SYSTEMML/package/scripts/params.py |  40 ------
 .../SYSTEMML/package/scripts/service_check.py   |  43 -------
 .../SYSTEMML/package/scripts/systemml_client.py |  49 -------
 .../services/TITAN/configuration/titan-env.xml  |  48 -------
 .../TITAN/configuration/titan-hbase-solr.xml    |  67 ----------
 .../TITAN/configuration/titan-log4j.xml         |  66 ----------
 .../4.2/services/TITAN/kerberos.json            |  17 ---
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 -------------
 .../TITAN/package/files/titanSmoke.groovy       |  20 ---
 .../services/TITAN/package/scripts/params.py    | 128 -------------------
 .../TITAN/package/scripts/service_check.py      |  64 ----------
 .../4.2/services/TITAN/package/scripts/titan.py |  70 ----------
 .../TITAN/package/scripts/titan_client.py       |  58 ---------
 18 files changed, 3 insertions(+), 919 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
index dc4811b..35fc0d8 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
@@ -4,22 +4,14 @@
   "general_deps" : {
     "_comment" : "dependencies for all cases",
     "HIVE_SERVER_INTERACTIVE-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START", "MYSQL_SERVER-START"],
-    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP", "KERNEL_GATEWAY-STOP" ],
+    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP"],
     "NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "KERNEL_GATEWAY-STOP" ],
     "NAMENODE-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
     "HIVE_SERVER_INTERACTIVE-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
     "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START", "HIVE_SERVER_INTERACTIVE-START"],
     "RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"],
     "SPARK2_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK2_JOBHISTORYSERVER-START", "APP_TIMELINE_SERVER-START"],
-    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"],
-    "TITAN_SERVER-START" : ["HBASE_SERVICE_CHECK-SERVICE_CHECK", "SOLR-START"],
-    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["TITAN_SERVER-START"],
-    "KERNEL_GATEWAY-INSTALL": ["SPARK2_CLIENT-INSTALL"],
-    "PYTHON_CLIENT-INSTALL": ["KERNEL_GATEWAY-INSTALL"],
-    "KERNEL_GATEWAY-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "SPARK2_JOBHISTORYSERVER-START"],
-    "JNBG_SERVICE_CHECK-SERVICE_CHECK": ["KERNEL_GATEWAY-START"],
-    "R4ML-INSTALL": ["SPARK2_CLIENT-INSTALL", "SYSTEMML-INSTALL"],
-    "R4ML_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", "SPARK2_JOBHISTORYSERVER-START"]
+    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
index 8883f57..1caa307 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
@@ -26,9 +26,7 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     parentRecommendConfDict = super(BigInsights425StackAdvisor, self).getServiceConfigurationRecommenderDict()
     childRecommendConfDict = {
       "HDFS": self.recommendHDFSConfigurations,
-      "JNBG": self.recommendJNBGConfigurations,
       "SOLR": self.recommendSolrConfigurations,
-      "TITAN": self.recommendTitanConfigurations,
       "RANGER": self.recommendRangerConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
@@ -37,55 +35,11 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
   def getServiceConfigurationValidators(self):
     parentValidators = super(BigInsights425StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {
-      "JNBG": {"jnbg-env": self.validateJNBGConfigurations},
       "SOLR": {"ranger-solr-plugin-properties": self.validateSolrRangerPluginConfigurations}
     }
     self.mergeValidators(parentValidators, childValidators)
     return parentValidators
 
-  def recommendJNBGConfigurations(self, configurations, clusterData, services, hosts):
-    putJNBGEnvProperty = self.putProperty(configurations, "jnbg-env", services)
-    putJNBGEnvPropertyAttribute = self.putPropertyAttribute(configurations, "jnbg-env")
-   
-    distro_version = platform.linux_distribution()[1]
-    # On RHEL 6.x default path does not point to a Python 2.7
-    # so empty out the field and force user to update the path
-    if distro_version < "7.0":
-      putJNBGEnvProperty('python_interpreter_path', "")
-
-  def validateJNBGConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
-    jnbg_env = getSiteProperties(configurations, "jnbg-env")
-    py_exec = jnbg_env.get("python_interpreter_path") if jnbg_env and "python_interpreter_path" in jnbg_env else []
-
-    # Test that it is a valid executable path before proceeding
-    if not os.path.isfile(py_exec) and not os.access(py_exec, os.X_OK):
-      validationItems.append({"config-name": "python_interpreter_path",
-                              "item": self.getErrorItem("Invalid Python interpreter path specified")})
-      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
-    distro_version = platform.linux_distribution()[1]
-    if distro_version < "7.0" and (py_exec == "/opt/rh/python27/root/usr/bin/python" or py_exec == "/opt/rh/python27/root/usr/bin/python2" or py_exec == "/opt/rh/python27/root/usr/bin/python2.7"):
-      # Special handling for RHSCL Python 2.7
-      proc = Popen(['/usr/bin/scl', 'enable', 'python27', '/opt/rh/python27/root/usr/bin/python' ' -V'], stderr=PIPE)
-    else:
-      proc = Popen([py_exec, '-V'], stderr=PIPE)
-    py_string = proc.communicate()[1]
-    py_version = py_string.split()[1]
-
-    if "Python" not in py_string:
-      validationItems.append({"config-name": "python_interpreter_path",
-                              "item": self.getErrorItem("Path specified does not appear to be a Python interpreter")})
-      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
-    # Validate that the specified python is 2.7.x (not > 2.x.x and not < 2.7)
-    if not py_version.split('.')[0] == '2' or (py_version.split('.')[0] == '2' and py_version.split('.')[1] < '7'):
-      validationItems.append({"config-name": "python_interpreter_path",
-                              "item": self.getErrorItem("Specified Python interpreter must be version >= 2.7 and < 3.0")})
-      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
-    return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
   def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
     putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
@@ -96,13 +50,6 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     zookeeper_host_port = ",".join(zookeeper_host_port)
     ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'solr')
     putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
-
-  def recommendTitanConfigurations(self, configurations, clusterData, services, hosts):
-    putTitanPropertyAttribute = self.putPropertyAttribute(configurations, "titan-env")
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    knox_enabled = "KNOX" in servicesList
-    if knox_enabled:
-      putTitanPropertyAttribute("SimpleAuthenticator", "visible", "false")
  
   def recommendSolrConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendSolrConfigurations(configurations, clusterData, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
index 5ee4b32..cc45213 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
@@ -19,8 +19,7 @@
     "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["FALCON_SERVER-START"],
-    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
-    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_SERVICE_CHECK-SERVICE_CHECK"]
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
deleted file mode 100755
index b73e31e..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SYSTEMML</name>
-      <displayName>SystemML</displayName>
-      <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
-      <version>0.10.0.4.2</version>
-      <components>
-        <component>
-          <name>SYSTEMML</name>
-          <displayName>SystemML</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/systemml_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-          </configFiles>          
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>apache_systemml*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
deleted file mode 100755
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
deleted file mode 100755
index dd7e46c..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.script.script import Script
-
-# server configurations
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-
-systemml_home_dir = format("{stack_root}/current/systemml-client")
-systemml_lib_dir = format("{systemml_home_dir}/lib")
-systemml_scripts_dir = format("{systemml_home_dir}/scripts")
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-stack_version = format_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-stack_name = default("/hostLevelParams/stack_name", None)
-
-java_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
deleted file mode 100755
index c15b907..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.format import format
-import subprocess
-import os
-
-class SystemMLServiceCheck(Script):
-    def service_check(self, env):
-        import params
-        env.set_params(params)
-        
-        if os.path.exists(params.systemml_lib_dir):
-            cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
-            java = format("{params.java_home}/bin/java")
-            command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
-            process = subprocess.Popen(command, stdout=subprocess.PIPE)
-            output = process.communicate()[0]
-            print output
-        
-            if 'Apache SystemML' not in output:
-                raise Fail("Expected output Apache SystemML not found.")
-
-if __name__ == "__main__":
-    SystemMLServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
deleted file mode 100755
index 2d45b68..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-#from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-
-
-class SystemMLClient(Script):
-
-  def get_component_name(self):
-    return "systemml-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      #conf_select.select(params.stack_name, "systemml", params.version)
-      stack_select.select("systemml-client", params.version)
-
-  def install(self, env):
-    self.install_packages(env)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  SystemMLClient().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
deleted file mode 100755
index 4f80ea1..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
-  <property>
-    <name>titan_user</name>
-    <description>User to run Titan as</description>
-    <on-ambari-upgrade add="true"/>
-    <property-type>USER</property-type>
-    <value>titan</value>
-  </property>
-
-  <property>
-    <name>content</name>
-    <description>This is the template for titan-env.sh file</description>
-    <value>
-# Set JAVA HOME
-export JAVA_HOME={{java64_home}}
-
-# Add hadoop and hbase configuration directories into classpath
-export HADOOP_CONF_DIR={{hadoop_config_dir}}
-export HBASE_CONF_DIR={{hbase_config_dir}}
-CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
-    </value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
deleted file mode 100755
index dd45141..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_adding_forbidden="true" supports_do_not_extend="true">
-
-  <property>
-    <name>content</name>
-    <description>Describe the configurations for Solr</description>
-    <value># Titan configuration sample: HBase and Solr
-# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
-
-# This file connects to HBase using a Zookeeper quorum
-# (storage.hostname) consisting solely of localhost. It also
-# connects to Solr running on localhost using Solr's HTTP API.
-# Zookeeper, the HBase services, and Solr must already be
-# running and available before starting Titan with this file.
-storage.backend=hbase
-storage.hostname={{storage_host}}
-storage.hbase.table=titan_solr
-storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
-
-cache.db-cache = true
-cache.db-cache-clean-wait = 20
-cache.db-cache-time = 180000
-cache.db-cache-size = 0.5
-
-# The indexing backend used to extend and optimize Titan's query
-# functionality. This setting is optional. Titan can use multiple
-# heterogeneous index backends. Hence, this option can appear more than
-# once, so long as the user-defined name between "index" and "backend" is
-# unique among appearances.Similar to the storage backend, this should be
-# set to one of Titan's built-in shorthand names for its standard index
-# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
-# package and classname of a custom/third-party IndexProvider
-# implementation.
-
-index.search.backend=solr
-index.search.solr.mode=cloud
-index.search.solr.zookeeper-url={{solr_server_host}}/solr
-index.search.solr.configset=titan
-    </value>
-    <on-ambari-upgrade add="true"/>
-    <value-attributes>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
deleted file mode 100755
index f61a479..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j-console.properties</description>
-    <value>
-      # Used by gremlin.sh
-
-      log4j.appender.A2=org.apache.log4j.ConsoleAppender
-      log4j.appender.A2.Threshold=TRACE
-      log4j.appender.A2.layout=org.apache.log4j.PatternLayout
-      log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
-
-      log4j.rootLogger=${gremlin.log4j.level}, A2
-
-      #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
-      #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
-
-      # Disable spurious Hadoop config deprecation warnings under 2.2.0.
-      #
-      # See https://issues.apache.org/jira/browse/HADOOP-10178
-      #
-      # This can and should be deleted when we upgrade our Hadoop 2.2.0
-      # dependency to 2.3.0 or 3.0.0.
-      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
-
-      # Configure MR at its own loglevel. We usually want MR at INFO,
-      # even if the rest of the loggers are at WARN or ERROR or FATAL,
-      # because job progress information is at INFO.
-      log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
-      log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
-
-      # This generates 3 INFO lines per jar on the classpath -- usually more
-      # noise than desirable in the REPL. Switching it to the default
-      # log4j level means it will be at WARN by default, which is ideal.
-      log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
-    </value>
-    <on-ambari-upgrade add="true"/>
-    <value-attributes>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
deleted file mode 100755
index ccabbf0..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "services": [
-    {
-      "name": "TITAN",
-      "components": [
-        {
-          "name": "TITAN",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
deleted file mode 100755
index 73f4635..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>TITAN</name>
-      <displayName>Titan</displayName>
-      <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
-        billions of vertices and edges distributed across a multi-machine cluster.</comment>
-      <version>1.0.0</version>
-      <components>
-        <component>
-          <name>TITAN</name>
-          <displayName>Titan</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/titan_client.py</script>
-            <scriptType>PYTHON</scriptType>
-	        <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-               <type>env</type>
-               <fileName>titan-env.sh</fileName>
-               <dictionaryName>titan-env</dictionaryName>
-            </configFile>
-            <configFile>
-                <type>env</type>
-                <fileName>log4j-console.properties</fileName>
-                <dictionaryName>titan-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-                <type>env</type>
-                <fileName>titan-hbase-solr.properties</fileName>
-                <dictionaryName>titan-hbase-solr</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>titan_4_2_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>		
-        <service>HDFS</service>
-        <service>HBASE</service>
-        <service>SOLR</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>titan-env</config-type>
-        <config-type>titan-hbase-solr</config-type>
-        <config-type>titan-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
deleted file mode 100755
index 79438be..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
+++ /dev/null
@@ -1,20 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-import com.thinkaurelius.titan.core.TitanFactory;
-
-graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
-g = graph.traversal()
-l = g.V().values('name').toList()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
deleted file mode 100755
index 3cb7aef..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-
-titan_user = config['configurations']['titan-env']['titan_user']
-user_group = config['configurations']['cluster-env']['user_group']
-titan_bin_dir = '/usr/iop/current/titan-client/bin'
-
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-# titan configurations
-titan_conf_dir = "/usr/iop/current/titan-client/conf"
-titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
-titan_env_props = config['configurations']['titan-env']['content']
-log4j_console_props = config['configurations']['titan-log4j']['content']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_config_dir = '/etc/hadoop/conf'
-hbase_config_dir = '/etc/hbase/conf'
-
-# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
-# The host name should be zooKeeper quorum
-storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
-storage_host_list = []
-for hostname in storage_hosts:
-  storage_host_list.append(hostname)
-storage_host = ",".join(storage_host_list)
-hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
-
-# Solr cloud host
-solr_hosts = config['clusterHostInfo']['solr_hosts']
-solr_host_list = []
-for hostname in solr_hosts:
-  solr_host_list.append(hostname)
-solr_host = ",".join(solr_host_list)
-solr_server_host = solr_hosts[0]
-
-# Titan client, it does not work right now, there is no 'titan_host' in 'clusterHostInfo'
-# It will return "Configuration parameter 'titan_host' was not found in configurations dictionary!"
-# So here is a known issue as task 118900, will install titan and solr on same node right now.
-# titan_host = config['clusterHostInfo']['titan_host']
-titan_host = solr_server_host
-
-# Conf directory and jar should be copy to solr site
-titan_dir = format('/usr/iop/current/titan-client')
-titan_ext_dir = format('/usr/iop/current/titan-client/ext')
-titan_solr_conf_dir = format('/usr/iop/current/titan-client/conf/solr')
-titan_solr_jar_file = format('/usr/iop/current/titan-client/lib/jts-1.13.jar')
-
-titan_solr_hdfs_dir = "/apps/titan"
-titan_solr_hdfs_conf_dir = "/apps/titan/conf"
-titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
-titan_tmp_dir = format('{tmp_dir}/titan')
-titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
-configuration_tags = config['configurationTags']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-titan_hdfs_mode = 0775
-
-#for create_hdfs_directory
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_site = config['configurations']['hdfs-site']
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-import functools
-#to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user = hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
deleted file mode 100755
index 3c011a1..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import os
-from resource_management import *
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import InlineTemplate, StaticFile
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.validate import call_and_match_output
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-class TitanServiceCheck(Script):
-    pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class TitanServiceCheckLinux(TitanServiceCheck):
-    def service_check(self, env):
-        import params
-        env.set_params(params)
-
-        File( format("{tmp_dir}/titanSmoke.groovy"),
-              content = StaticFile("titanSmoke.groovy"),
-              mode = 0755
-              )
-
-        if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.2') >= 0:
-            if params.security_enabled:
-                kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-                Execute(kinit_cmd,
-                        user=params.smokeuser
-                        )
-
-            Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
-                    tries     = 3,
-                    try_sleep = 5,
-                    path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
-                    user      = params.smokeuser,
-                    logoutput = True
-                    )
-
-if __name__ == "__main__":
-    # print "Track service check status"
-    TitanServiceCheckLinux().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
deleted file mode 100755
index fd94c82..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-from resource_management import *
-from resource_management.core.source import InlineTemplate
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def titan():
-    import params
-
-    Directory(params.titan_conf_dir,
-               create_parents = True,
-               owner=params.titan_user,
-               group=params.user_group
-               )
-
-    File(format("{params.titan_conf_dir}/titan-env.sh"),
-             mode=0644,
-             group=params.user_group,
-             owner=params.titan_user,
-             content=InlineTemplate(params.titan_env_props)
-             )
-
-    # titan-hbase-solr_properties is always set to a default even if it's not in the payload
-    File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.titan_user,
-         content=InlineTemplate(params.titan_hbase_solr_props)
-         )
-
-    if (params.log4j_console_props != None):
-        File(format("{params.titan_conf_dir}/log4j-console.properties"),
-             mode=0644,
-             group=params.user_group,
-             owner=params.titan_user,
-             content=InlineTemplate(params.log4j_console_props)
-             )
-    elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
-        File(format("{params.titan_conf_dir}/log4j-console.properties"),
-             mode=0644,
-             group=params.user_group,
-             owner=params.titan_user
-             )
-    # Change titan ext directory for multiple user access
-    Directory(params.titan_ext_dir,
-               create_parents = True,
-               owner=params.titan_user,
-               group=params.user_group,
-               mode=0775
-               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
deleted file mode 100755
index d54ccee..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-import os
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from titan import titan
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-class TitanClient(Script):
-    def configure(self, env):
-        import params
-        env.set_params(params)
-        titan()
-
-    def status(self, env):
-        raise ClientComponentHasNoStatus()
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class TitanClientLinux(TitanClient):
-    def get_component_name(self):
-        return "titan-client"
-
-    def pre_rolling_restart(self, env):
-        import params
-        env.set_params(params)
-
-        if params.version and compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:
-            conf_select.select(params.stack_name, "titan", params.version)
-            stack_select.select("titan-client", params.version)
-
-    def install(self, env):
-        self.install_packages(env)
-        self.configure(env)
-
-if __name__ == "__main__":
-    TitanClient().execute()


[50/50] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21450

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21450


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5cdcd070
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5cdcd070
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5cdcd070

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 5cdcd0701aa0003587c1c94a68af0167ddf4df64
Parents: ae3ce90 d8a5bad
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Jul 18 11:02:00 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 18 11:02:00 2017 -0400

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |   3 +
 .../stackVersions/StackVersionsCreateCtrl.js    |   3 +-
 .../resource_management/TestPackageResource.py  |   4 +-
 .../core/providers/package/yumrpm.py            |   2 +-
 .../core/providers/package/zypper.py            |   2 +-
 .../libraries/functions/stack_features.py       |  28 +-
 .../libraries/functions/stack_tools.py          |  15 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   6 +-
 ambari-server/pom.xml                           |   2 +-
 ambari-server/sbin/ambari-server                |   6 +-
 .../ambari/server/agent/ExecutionCommand.java   |   2 +
 .../ambari/server/checks/CheckDescription.java  |  38 +-
 .../checks/ComponentsExistInRepoCheck.java      | 142 ++++++
 .../controller/ActionExecutionContext.java      |  28 ++
 .../controller/AmbariActionExecutionHelper.java |  26 +-
 .../AmbariCustomCommandExecutionHelper.java     |  22 +-
 .../AmbariManagementControllerImpl.java         | 122 +++--
 .../server/controller/ServiceRequest.java       |  26 +-
 .../AbstractControllerResourceProvider.java     |  23 +
 .../ClusterStackVersionResourceProvider.java    |   2 +
 .../internal/HostResourceProvider.java          |   1 +
 .../internal/ServiceResourceProvider.java       |  64 ++-
 .../controller/utilities/PropertyHelper.java    |   8 +
 .../apache/ambari/server/orm/DBAccessor.java    |  14 +
 .../ambari/server/orm/DBAccessorImpl.java       |  24 +
 .../LdapToPamMigrationHelper.java               |  73 +++
 .../server/security/authorization/Users.java    |   4 +
 .../upgrades/ChangeStackReferencesAction.java   |   4 +-
 .../upgrades/FinalizeUpgradeAction.java         |   1 +
 .../upgrades/UpgradeUserKerberosDescriptor.java |  41 +-
 .../org/apache/ambari/server/state/Host.java    |   4 +-
 .../ambari/server/state/PropertyInfo.java       |   2 +
 .../ambari/server/state/UpgradeContext.java     |   3 +-
 .../ambari/server/state/host/HostImpl.java      |  17 +-
 .../KerberosDescriptorUpdateHelper.java         |   9 +-
 .../ambari/server/topology/AmbariContext.java   |  17 +-
 .../server/upgrade/UpgradeCatalog252.java       | 110 ++++-
 ambari-server/src/main/python/ambari-server.py  |  10 +-
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../main/python/ambari_server/setupSecurity.py  | 123 ++++-
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  40 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   9 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   8 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  26 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  11 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   7 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../common-services/JNBG/0.2.0/alerts.json      |  32 ++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 209 +++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 +++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 +++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 ++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 +++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 +++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 ++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 +++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 +++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 ++
 .../0.10.0/configuration/ranger-kafka-audit.xml |  58 +++
 .../common-services/KAFKA/0.10.0/kerberos.json  |  79 ++++
 .../common-services/KAFKA/0.10.0/metainfo.xml   |  28 ++
 .../KAFKA/0.8.1/package/scripts/kafka.py        |  12 +
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |   2 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   |   8 +
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |   2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  26 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   4 +-
 .../package/scripts/oozie_server_upgrade.py     |  15 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  15 +-
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 ++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 ++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 ++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 +
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 +
 .../R4ML/0.8.0/package/scripts/params.py        |  80 ++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 +++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 ++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 +
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++
 .../0.10.0/package/scripts/service_check.py     |  43 ++
 .../0.10.0/package/scripts/systemml_client.py   |  49 ++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 ++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 ++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 +++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 +++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 +++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++
 .../package/alerts/alert_check_titan_server.py  |  65 +++
 .../package/files/gremlin-server-script.sh      |  86 ++++
 .../package/files/tinkergraph-empty.properties  |  18 +
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 +
 .../TITAN/1.0.0/package/scripts/params.py       | 202 +++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 ++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 +++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 +++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 +
 .../package/templates/titan_solr_jaas.conf.j2   |  26 ++
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   6 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  12 +-
 .../0.8/services/HDFS/package/scripts/params.py |  11 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../0.8/services/YARN/package/scripts/params.py |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../BigInsights/4.0/services/HIVE/metainfo.xml  |   2 +-
 .../configuration/spark-javaopts-properties.xml |   3 +
 .../BigInsights/4.0/services/SPARK/metainfo.xml |   2 +-
 .../package/scripts/spark_thrift_server.py      | 125 ++++++
 .../SPARK/package/scripts/thrift_server.py      | 125 ------
 .../4.0/stack-advisor/stack_advisor_25.py       |   5 +-
 .../stacks/BigInsights/4.2.5/metainfo.xml       |   2 +-
 .../HBASE/package/files/draining_servers.rb     | 164 +++++++
 .../HBASE/package/files/hbase-smoke-cleanup.sh  |  23 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |  34 ++
 .../services/HBASE/package/scripts/__init__.py  |  19 +
 .../services/HBASE/package/scripts/functions.py |  54 +++
 .../services/HBASE/package/scripts/hbase.py     | 234 ++++++++++
 .../HBASE/package/scripts/hbase_client.py       |  82 ++++
 .../HBASE/package/scripts/hbase_decommission.py |  93 ++++
 .../HBASE/package/scripts/hbase_master.py       | 163 +++++++
 .../HBASE/package/scripts/hbase_regionserver.py | 166 +++++++
 .../package/scripts/hbase_restgatewayserver.py  |  83 ++++
 .../HBASE/package/scripts/hbase_service.py      |  93 ++++
 .../HBASE/package/scripts/hbase_upgrade.py      |  41 ++
 .../services/HBASE/package/scripts/params.py    |  29 ++
 .../HBASE/package/scripts/params_linux.py       | 447 +++++++++++++++++++
 .../HBASE/package/scripts/params_windows.py     |  43 ++
 .../package/scripts/phoenix_queryserver.py      |  88 ++++
 .../HBASE/package/scripts/phoenix_service.py    |  55 +++
 .../HBASE/package/scripts/service_check.py      |  95 ++++
 .../HBASE/package/scripts/setup_ranger_hbase.py | 106 +++++
 .../HBASE/package/scripts/status_params.py      |  68 +++
 .../services/HBASE/package/scripts/upgrade.py   |  65 +++
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 117 +++++
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 116 +++++
 .../HBASE/package/templates/hbase-smoke.sh.j2   |  44 ++
 .../HBASE/package/templates/hbase.conf.j2       |  35 ++
 .../package/templates/hbase_client_jaas.conf.j2 |  23 +
 .../templates/hbase_grant_permissions.j2        |  40 ++
 .../package/templates/hbase_master_jaas.conf.j2 |  26 ++
 .../templates/hbase_queryserver_jaas.conf.j2    |  26 ++
 .../templates/hbase_regionserver_jaas.conf.j2   |  26 ++
 .../package/templates/hbase_rest_jaas.conf.j2   |  26 ++
 .../HBASE/package/templates/regionservers.j2    |  20 +
 .../4.2.5/services/HIVE/metainfo.xml            |   2 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 ++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++
 .../spark2-javaopts-properties.xml              |   5 +-
 .../4.2.5/services/SPARK2/metainfo.xml          |   2 +-
 .../4.2.5/services/SQOOP/metainfo.xml           |   2 +-
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++
 .../4.2.5/upgrades/config-upgrade.xml           | 101 ++++-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 229 +++++++++-
 .../services/HBASE/package/scripts/params.py    |   4 +-
 .../BigInsights/4.2/services/HIVE/metainfo.xml  |   2 +-
 .../BigInsights/4.2/services/KNOX/kerberos.json |   6 -
 .../RANGER/configuration/ranger-admin-site.xml  |  14 +
 .../configuration/spark-javaopts-properties.xml |   3 +
 .../BigInsights/4.2/services/SPARK/metainfo.xml |   2 +-
 .../services/SPARK/package/scripts/params.py    |   5 +-
 .../package/scripts/spark_thrift_server.py      | 119 +++++
 .../SPARK/package/scripts/thrift_server.py      | 119 -----
 .../services/TITAN/configuration/titan-env.xml  |   4 +-
 .../TITAN/configuration/titan-hbase-solr.xml    |   2 +-
 .../TITAN/configuration/titan-log4j.xml         |   2 +-
 .../BigInsights/4.2/upgrades/config-upgrade.xml | 116 ++++-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 206 ++++++++-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |   9 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/exclude_hosts_list.j2     |  21 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |  13 +-
 .../before-ANY/scripts/shared_initialization.py |  45 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml  |   1 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |  11 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   6 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   1 +
 .../configuration/application-properties.xml    |  17 +
 .../services/HIVE/configuration/hive-env.xml    |  78 ++--
 .../HIVE/configuration/hive-interactive-env.xml |  62 +--
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   4 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 .../src/main/resources/stacks/stack_advisor.py  |  18 +
 .../checks/ComponentExistsInRepoCheckTest.java  | 329 ++++++++++++++
 .../AmbariManagementControllerTest.java         |   8 +-
 .../server/orm/dao/ClusterVersionDAOTest.java   | 264 -----------
 .../ChangeStackReferencesActionTest.java        |   1 +
 .../upgrades/UpgradeActionTest.java             |   2 -
 .../UpgradeUserKerberosDescriptorTest.java      |  19 +-
 .../KerberosDescriptorUpdateHelperTest.java     |  70 +++
 .../src/test/python/TestAmbariServer.py         |  13 +-
 .../src/test/python/TestStackFeature.py         |  44 +-
 .../python/custom_actions/test_ru_set_all.py    |   6 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +-
 .../python/stacks/2.0.6/configs/default.json    |   2 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +-
 .../2.0.6/configs/default_no_install.json       |   2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +-
 .../default_update_exclude_file_only.json       |   2 +-
 .../2.0.6/configs/default_with_bucket.json      |   2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |   2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   2 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 294 +++++++-----
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 .../test/python/stacks/2.3/configs/ats_1_5.json |   2 +-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 ++++---
 .../python/stacks/2.5/configs/hsi_default.json  |   2 +-
 .../2.5/configs/hsi_default_for_restart.json    |   2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   2 +-
 .../main/admin/stack_and_upgrade_controller.js  |  22 +
 .../app/controllers/wizard/step7_controller.js  |  67 +++
 .../configs/stack_config_properties_mapper.js   |  14 +-
 ambari-web/app/messages.js                      |   1 +
 ambari-web/app/styles/application.less          |  15 +
 .../main/admin/stack_upgrade/versions.hbs       |   9 +-
 ...ontrols_service_config_usergroup_with_id.hbs |  27 ++
 ambari-web/app/utils/ajax/ajax.js               |   2 +-
 ambari-web/app/utils/config.js                  |   3 +
 .../configs/service_configs_by_category_view.js |   6 +
 ambari-web/app/views/common/controls_view.js    |  39 ++
 .../stack_upgrade/upgrade_version_box_view.js   |  11 +-
 .../main/admin/stack_upgrade/versions_view.js   |  16 -
 .../admin/stack_and_upgrade_controller_test.js  |  19 +
 .../admin/stack_upgrade/version_view_test.js    |  42 --
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../YARN/package/scripts/params_linux.py        |   9 +-
 .../YARN/package/scripts/params_windows.py      |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 279 files changed, 10995 insertions(+), 1193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 441c047,7948d30..9b6b2f5
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@@ -452,6 -433,9 +452,8 @@@ public class ExecutionCommand extends A
      String GROUP_LIST = "group_list";
      String USER_GROUPS = "user_groups";
      String NOT_MANAGED_HDFS_PATH_LIST = "not_managed_hdfs_path_list";
 -    String VERSION = "version";
+     String SOURCE_STACK = "source_stack";
+     String TARGET_STACK = "target_stack";
      String REFRESH_TOPOLOGY = "refresh_topology";
      String HOST_SYS_PREPPED = "host_sys_prepped";
      String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0,af506f2..9939ce7
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@@ -24,9 -23,9 +24,10 @@@ import java.util.List
  import java.util.Map;
  
  import org.apache.ambari.server.actionmanager.TargetHostType;
 +import org.apache.ambari.server.agent.ExecutionCommand;
  import org.apache.ambari.server.controller.internal.RequestOperationLevel;
  import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
  
  /**
   * The context required to create tasks and stages for a custom action
@@@ -43,8 -42,7 +44,9 @@@ public class ActionExecutionContext 
    private String expectedComponentName;
    private boolean hostsInMaintenanceModeExcluded = true;
    private boolean allowRetry = false;
 +
 +  private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
+   private RepositoryVersionEntity repositoryVersion;
  
    /**
     * {@code true} if slave/client component failures should be automatically
@@@ -173,15 -171,31 +175,41 @@@
    }
  
    /**
+    * Gets the stack/version to use for generating stack-associated values for a
+    * command. In some cases the cluster's stack is not the correct one to use,
+    * such as when distributing a repository.
+    *
+    * @return the repository for the stack/version to use when generating
+    *         stack-specific content for the command.
+    *
+    * @return
+    */
+   public RepositoryVersionEntity getRepositoryVersion() {
+     return repositoryVersion;
+   }
+ 
+   /**
+    * Sets the stack/version to use for generating stack-associated values for a
+    * command. In some cases the cluster's stack is not the correct one to use,
+    * such as when distributing a repository.
+    *
+    * @param stackId
+    *          the stackId to use for stack-based properties on the command.
+    */
+   public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+     this.repositoryVersion = repositoryVersion;
+   }
+ 
++  /**
 +   * Adds a command visitor that will be invoked after a command is created.  Provides access
 +   * to the command.
 +   *
 +   * @param visitor the visitor
 +   */
 +  public void addVisitor(ExecutionCommandVisitor visitor) {
 +    m_visitors.add(visitor);
 +  }
 +
    @Override
    public String toString() {
      return "ActionExecutionContext{" +

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 1b0e0e0,0638910..04f1cb3
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@@ -461,11 -459,7 +462,12 @@@ public class AmbariActionExecutionHelpe
          hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
        }
  
 -      addRepoInfoToHostLevelParams(cluster, actionContext, hostLevelParams, hostName);
 +      if (StringUtils.isNotBlank(serviceName)) {
 +        Service service = cluster.getService(serviceName);
-         addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
++        addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
++            hostLevelParams, hostName);
 +      }
 +
  
        Map<String, String> roleParams = execCmd.getRoleParams();
        if (roleParams == null) {
@@@ -527,10 -519,10 +529,25 @@@
    *
    * */
  
-   private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
 -  private void addRepoInfoToHostLevelParams(Cluster cluster, ActionExecutionContext actionContext,
--      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
 -    if (null == cluster) {
++  private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
++      RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
++      String hostName) throws AmbariException {
++
++    // if the repo is null, see if any values from the context should go on the
++    // host params and then return
 +    if (null == repositoryVersion) {
++      // see if the action context has a repository set to use for the command
++      if (null != actionContext.getRepositoryVersion()) {
++        StackId stackId = actionContext.getRepositoryVersion().getStackId();
++        hostLevelParams.put(STACK_NAME, stackId.getStackName());
++        hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
++      }
++
        return;
++    } else {
++      StackId stackId = repositoryVersion.getStackId();
++      hostLevelParams.put(STACK_NAME, stackId.getStackName());
++      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
      }
  
      JsonObject rootJsonObject = new JsonObject();
@@@ -554,8 -550,18 +571,5 @@@
      }
  
      hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
--
-     hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-     hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
 -    // set the host level params if not already set by whoever is creating this command
 -    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
 -      // see if the action context has a repository set to use for the command, otherwise use the
 -      // cluster's current stack ID
 -      StackId stackId = cluster.getCurrentStackVersion();
 -      if (null != actionContext.getRepositoryVersion()) {
 -        stackId = actionContext.getRepositoryVersion().getStackId();
 -      }
 -
 -      hostLevelParams.put(STACK_NAME, stackId.getStackName());
 -      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
 -    }
    }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 6360a04,e321559..011ebfd
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@@ -1044,8 -1010,8 +1050,8 @@@ public class AmbariCustomCommandExecuti
        }
  
        if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
-         commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
+         commandParams.put(UPDATE_FILES_ONLY, "false");
 -        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString());
 +        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), null);
        }
      }
    }
@@@ -1467,14 -1318,18 +1473,18 @@@
          hostParamsStageJson);
    }
  
 -  Map<String, String> createDefaultHostParams(Cluster cluster) throws AmbariException {
 -    StackId stackId = cluster.getDesiredStackVersion();
 -    return createDefaultHostParams(cluster, stackId);
 +  Map<String, String> createDefaultHostParams(Cluster cluster, RepositoryVersionEntity repositoryVersion) throws AmbariException {
 +    return createDefaultHostParams(cluster, repositoryVersion.getStackId());
    }
  
 -  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException{
 +  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException {
 +
      TreeMap<String, String> hostLevelParams = new TreeMap<>();
      hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
+     hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
+     hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
+     hostLevelParams.put(JDK_NAME, managementController.getJDKName());
+     hostLevelParams.put(JCE_NAME, managementController.getJCEName());
      hostLevelParams.put(STACK_NAME, stackId.getStackName());
      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
      hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
index 0fbb63f,a8e6315..1a6a040
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
@@@ -28,25 -28,17 +28,27 @@@ public class ServiceRequest 
    private String credentialStoreEnabled; // CREATE/UPDATE/GET
    private String credentialStoreSupported; //GET
  
-   private Long desiredRepositoryVersionId;
 -  public ServiceRequest(String clusterName, String serviceName,
 -                        String desiredState) {
 -    this(clusterName, serviceName, desiredState, null);
++  private String desiredStack;
++  private String desiredRepositoryVersion;
 +  /**
 +   * Short-lived object that gets set while validating a request
 +   */
 +  private RepositoryVersionEntity resolvedRepository;
 +
-   public ServiceRequest(String clusterName, String serviceName,
-       Long desiredRepositoryVersionId, String desiredState) {
-     this(clusterName, serviceName, desiredRepositoryVersionId, desiredState, null);
++  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
++      String desiredRepositoryVersion, String desiredState) {
++    this(clusterName, serviceName, desiredStack, desiredRepositoryVersion, desiredState, null);
    }
  
--  public ServiceRequest(String clusterName, String serviceName,
-       Long desiredRepositoryVersionId, String desiredState, String credentialStoreEnabled) {
 -                        String desiredState,
 -                        String credentialStoreEnabled) {
++  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
++      String desiredRepositoryVersion, String desiredState, String credentialStoreEnabled) {
      this.clusterName = clusterName;
      this.serviceName = serviceName;
      this.desiredState = desiredState;
 +
-     this.desiredRepositoryVersionId = desiredRepositoryVersionId;
++    this.desiredStack = desiredStack;
++    this.desiredRepositoryVersion = desiredRepositoryVersion;
 +
      this.credentialStoreEnabled = credentialStoreEnabled;
      // Credential store supported cannot be changed after
      // creation since it comes from the stack definition.
@@@ -81,10 -73,6 +83,14 @@@
      this.desiredState = desiredState;
    }
  
-   public Long getDesiredRepositoryVersionId() {
-     return desiredRepositoryVersionId;
++  public String getDesiredStack() {
++    return desiredStack;
++  }
++
++  public String getDesiredRepositoryVersion() {
++    return desiredRepositoryVersion;
 +  }
 +
    /**
     * @return the clusterName
     */
@@@ -142,25 -130,13 +148,25 @@@
      this.credentialStoreSupported = credentialStoreSupported;
    }
  
 +  @Override
    public String toString() {
      StringBuilder sb = new StringBuilder();
 -    sb.append("clusterName=" + clusterName
 -        + ", serviceName=" + serviceName
 -        + ", desiredState=" + desiredState
 -        + ", credentialStoreEnabled=" + credentialStoreEnabled
 -        + ", credentialStoreSupported=" + credentialStoreSupported);
 +    sb.append("clusterName=").append(clusterName)
 +      .append(", serviceName=").append(serviceName)
 +      .append(", desiredState=").append(desiredState)
 +      .append(", credentialStoreEnabled=").append(credentialStoreEnabled)
 +      .append(", credentialStoreSupported=").append(credentialStoreSupported);
      return sb.toString();
    }
 -}
 +
 +  /**
 +   * @param repositoryVersion
 +   */
 +  public void setResolvedRepository(RepositoryVersionEntity repositoryVersion) {
 +    resolvedRepository = repositoryVersion;
 +  }
 +
 +  public RepositoryVersionEntity getResolvedRepository() {
 +    return resolvedRepository;
 +  }
- }
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
index eba1816,b26814a..a9d234d
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
@@@ -27,6 -27,6 +27,7 @@@ import org.apache.ambari.server.control
  import org.apache.ambari.server.controller.spi.Resource;
  import org.apache.ambari.server.controller.spi.ResourceProvider;
  import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
++import org.apache.ambari.server.controller.utilities.PropertyHelper;
  import org.apache.ambari.server.state.Cluster;
  
  /**
@@@ -56,6 -56,6 +57,28 @@@ public abstract class AbstractControlle
      super(propertyIds, keyPropertyIds);
      this.managementController = managementController;
    }
++  
++  /**
++   * Create a new resource provider for the given management controller. This
++   * constructor will initialize the specified {@link Resource.Type} with the
++   * provided keys. It should be used in cases where the provider declares its
++   * own keys instead of reading them from a JSON file.
++   *
++   * @param type
++   *          the type to set the properties for (not {@code null}).
++   * @param propertyIds
++   *          the property ids
++   * @param keyPropertyIds
++   *          the key property ids
++   * @param managementController
++   *          the management controller
++   */
++  AbstractControllerResourceProvider(Resource.Type type, Set<String> propertyIds,
++      Map<Resource.Type, String> keyPropertyIds, AmbariManagementController managementController) {
++    this(propertyIds, keyPropertyIds, managementController);
++    PropertyHelper.setPropertyIds(type, propertyIds);
++    PropertyHelper.setKeyPropertyIds(type, keyPropertyIds);
++  }
  
    public static void init(ResourceProviderFactory factory) {
      resourceProviderFactory = factory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be,633fe8c..d87d7a4
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@@ -600,16 -710,13 +600,18 @@@ public class ClusterStackVersionResourc
      RequestResourceFilter filter = new RequestResourceFilter(null, null,
              Collections.singletonList(host.getHostName()));
  
 -    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
 -        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
 +    ActionExecutionContext actionContext = new ActionExecutionContext(
 +            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
 +            Collections.singletonList(filter),
 +            roleParams);
+ 
+     actionContext.setRepositoryVersion(repoVersion);
      actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
  
 +    repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
 +
      return actionContext;
 +
    }
  
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 387e0dc,0cc1745..dcaaad9
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@@ -74,6 -72,6 +74,7 @@@ import org.apache.ambari.server.state.S
  import org.apache.ambari.server.state.ServiceInfo;
  import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.State;
++import org.apache.commons.collections.CollectionUtils;
  import org.apache.commons.lang.StringUtils;
  import org.apache.commons.lang.Validate;
  
@@@ -85,76 -83,37 +86,76 @@@ import com.google.inject.assistedinject
   * Resource provider for service resources.
   */
  public class ServiceResourceProvider extends AbstractControllerResourceProvider {
 +  public static final String SERVICE_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "cluster_name");
  
 +  public static final String SERVICE_SERVICE_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "service_name");
  
 -  // ----- Property ID constants ---------------------------------------------
 +  public static final String SERVICE_SERVICE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "state");
  
 -  // Services
 -  public static final String SERVICE_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
 -  public static final String SERVICE_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
 -  public static final String SERVICE_SERVICE_STATE_PROPERTY_ID   = PropertyHelper.getPropertyId("ServiceInfo", "state");
 -  public static final String SERVICE_MAINTENANCE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "maintenance_state");
 -  public static final String SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID =
 -    PropertyHelper.getPropertyId("ServiceInfo", "credential_store_supported");
 -  public static final String SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID =
 -    PropertyHelper.getPropertyId("ServiceInfo", "credential_store_enabled");
 +  public static final String SERVICE_MAINTENANCE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "maintenance_state");
  
 -  public static final String SERVICE_ATTRIBUTES_PROPERTY_ID = PropertyHelper.getPropertyId("Services", "attributes");
 +  public static final String SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "credential_store_supported");
  
 -  //Parameters from the predicate
 -  private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID =
 -    "params/run_smoke_test";
 +  public static final String SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "credential_store_enabled");
 +
 +  public static final String SERVICE_ATTRIBUTES_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "Services", "attributes");
 +
 +  public static final String SERVICE_DESIRED_STACK_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "desired_stack");
  
-   public static final String SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID = PropertyHelper.getPropertyId(
-       "ServiceInfo", "desired_repository_version_id");
 -  private static final String QUERY_PARAMETERS_RECONFIGURE_CLIENT =
 -    "params/reconfigure_client";
++  public static final String SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId(
++      "ServiceInfo", "desired_repository_version");
  
 -  private static final String QUERY_PARAMETERS_START_DEPENDENCIES =
 -    "params/start_dependencies";
 +  protected static final String SERVICE_REPOSITORY_STATE = "ServiceInfo/repository_state";
 +
 +  //Parameters from the predicate
 +  private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
 +  private static final String QUERY_PARAMETERS_RECONFIGURE_CLIENT = "params/reconfigure_client";
 +  private static final String QUERY_PARAMETERS_START_DEPENDENCIES = "params/start_dependencies";
  
    private static Set<String> pkPropertyIds =
 -      new HashSet<String>(Arrays.asList(new String[]{
 -          SERVICE_CLUSTER_NAME_PROPERTY_ID,
 -          SERVICE_SERVICE_NAME_PROPERTY_ID}));
 +    new HashSet<>(Arrays.asList(new String[]{
 +      SERVICE_CLUSTER_NAME_PROPERTY_ID,
 +      SERVICE_SERVICE_NAME_PROPERTY_ID}));
  
 +  /**
 +   * The property ids for an service resource.
 +   */
 +  private static final Set<String> PROPERTY_IDS = new HashSet<>();
 +
 +  /**
 +   * The key property ids for an service resource.
 +   */
 +  private static final Map<Resource.Type, String> KEY_PROPERTY_IDS = new HashMap<>();
 +
 +  static {
 +    // properties
 +    PROPERTY_IDS.add(SERVICE_CLUSTER_NAME_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_SERVICE_NAME_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_SERVICE_STATE_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_MAINTENANCE_STATE_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_ATTRIBUTES_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_DESIRED_STACK_PROPERTY_ID);
-     PROPERTY_IDS.add(SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID);
++    PROPERTY_IDS.add(SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_REPOSITORY_STATE);
 +
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_RUN_SMOKE_TEST_ID);
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_RECONFIGURE_CLIENT);
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_START_DEPENDENCIES);
 +
 +    // keys
 +    KEY_PROPERTY_IDS.put(Resource.Type.Service, SERVICE_SERVICE_NAME_PROPERTY_ID);
 +    KEY_PROPERTY_IDS.put(Resource.Type.Cluster, SERVICE_CLUSTER_NAME_PROPERTY_ID);
 +  }
  
    private MaintenanceStateHelper maintenanceStateHelper;
  
@@@ -177,12 -131,14 +178,12 @@@
     * @param managementController  the management controller
     */
    @AssistedInject
 -  public ServiceResourceProvider(@Assisted Set<String> propertyIds,
 -                          @Assisted Map<Resource.Type, String> keyPropertyIds,
 -                          @Assisted AmbariManagementController managementController,
 -                          MaintenanceStateHelper maintenanceStateHelper) {
 -    super(propertyIds, keyPropertyIds, managementController);
 +  public ServiceResourceProvider(
 +      @Assisted AmbariManagementController managementController,
 +      MaintenanceStateHelper maintenanceStateHelper, RepositoryVersionDAO repositoryVersionDAO) {
-     super(PROPERTY_IDS, KEY_PROPERTY_IDS, managementController);
++    super(Resource.Type.Service, PROPERTY_IDS, KEY_PROPERTY_IDS, managementController);
      this.maintenanceStateHelper = maintenanceStateHelper;
 +    this.repositoryVersionDAO = repositoryVersionDAO;
  
      setRequiredCreateAuthorizations(EnumSet.of(RoleAuthorization.SERVICE_ADD_DELETE_SERVICES));
      setRequiredUpdateAuthorizations(RoleAuthorization.AUTHORIZATIONS_UPDATE_SERVICE);
@@@ -251,19 -207,6 +252,15 @@@
        setResourceProperty(resource, SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID,
            String.valueOf(response.isCredentialStoreEnabled()), requestedIds);
  
-       RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByPK(response.getDesiredRepositoryVersionId());
++      setResourceProperty(resource, SERVICE_DESIRED_STACK_PROPERTY_ID,
++          response.getDesiredStackId(), requestedIds);
 +
-       // !!! TODO is the UI using this?
-       if (null != repoVersion) {
-         setResourceProperty(resource, SERVICE_DESIRED_STACK_PROPERTY_ID, repoVersion.getStackId(), requestedIds);
-       }
- 
-       setResourceProperty(resource, SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID,
-           response.getDesiredRepositoryVersionId(), requestedIds);
++      setResourceProperty(resource, SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID,
++          response.getDesiredRepositoryVersion(), requestedIds);
 +
 +      setResourceProperty(resource, SERVICE_REPOSITORY_STATE,
 +          response.getRepositoryVersionState(), requestedIds);
 +
        Map<String, Object> serviceSpecificProperties = getServiceSpecificProperties(
            response.getClusterName(), response.getServiceName(), requestedIds);
  
@@@ -385,13 -328,9 +382,13 @@@
     * @return the service request object
     */
    private ServiceRequest getRequest(Map<String, Object> properties) {
- 
-     String desiredRepoId = (String) properties.get(SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID);
++    String desiredStack = (String)properties.get(SERVICE_DESIRED_STACK_PROPERTY_ID);
++    String desiredRepositoryVersion = (String)properties.get(SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID);
 +
      ServiceRequest svcRequest = new ServiceRequest(
          (String) properties.get(SERVICE_CLUSTER_NAME_PROPERTY_ID),
          (String) properties.get(SERVICE_SERVICE_NAME_PROPERTY_ID),
-         null == desiredRepoId ? null : Long.valueOf(desiredRepoId),
++        desiredStack, desiredRepositoryVersion,
          (String) properties.get(SERVICE_SERVICE_STATE_PROPERTY_ID),
          (String) properties.get(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID));
  
@@@ -423,15 -362,10 +420,19 @@@
      for (ServiceRequest request : requests) {
        Cluster cluster = clusters.getCluster(request.getClusterName());
  
 -      // Already checked that service does not exist
 -      Service s = cluster.addService(request.getServiceName());
++      String desiredStack = request.getDesiredStack();
++
 +      RepositoryVersionEntity repositoryVersion = request.getResolvedRepository();
 +
 +      if (null == repositoryVersion) {
-         throw new AmbariException("Could not find any repository on the request.");
++        throw new AmbariException(String.format("Could not find any repositories defined by %s", desiredStack));
++      } else {
++        desiredStack = repositoryVersion.getStackId().toString();
 +      }
  
 -      /**
 +      Service s = cluster.addService(request.getServiceName(), repositoryVersion);
 +
 +      /*
         * Get the credential_store_supported field only from the stack definition.
         * Not possible to update the value through a request.
         */
@@@ -1065,21 -1023,7 +1066,36 @@@
          // Expected
        }
  
-       Long desiredRepositoryVersion = request.getDesiredRepositoryVersionId();
-       if (null == desiredRepositoryVersion) {
-         throw new IllegalArgumentException(String.format("%s is required when adding a service.", SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID));
-       }
 -      StackId stackId = cluster.getDesiredStackVersion();
++      String desiredStack = request.getDesiredStack();
++      StackId stackId = new StackId(desiredStack);
 +
-       RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(desiredRepositoryVersion);
++      String desiredRepositoryVersion = request.getDesiredRepositoryVersion();
++      RepositoryVersionEntity repositoryVersion = null;
++      if (StringUtils.isNotBlank(desiredRepositoryVersion)){
++        repositoryVersion = repositoryVersionDAO.findByVersion(desiredRepositoryVersion);
++      }
 +
 +      if (null == repositoryVersion) {
-         throw new IllegalArgumentException(String.format("Could not find any repositories defined by %d", desiredRepositoryVersion));
++        // !!! FIXME hack until the UI always sends the repository
++        if (null == desiredStack) {
++          desiredStack = cluster.getDesiredStackVersion().toString();
++        }
++
++        List<RepositoryVersionEntity> allVersions = repositoryVersionDAO.findByStack(new StackId(desiredStack));
++
++        if (CollectionUtils.isNotEmpty(allVersions)) {
++          repositoryVersion = allVersions.get(0);
++        }
 +      }
 +
-       StackId stackId = repositoryVersion.getStackId();
++      if (null == repositoryVersion) {
++        throw new AmbariException(String.format("Could not find any repositories defined by %s", desiredStack));
++      } else {
++        stackId = repositoryVersion.getStackId();
++      }
 +
 +      request.setResolvedRepository(repositoryVersion);
 +
        if (!ambariMetaInfo.isValidService(stackId.getStackName(),
                stackId.getStackVersion(), request.getServiceName())) {
          throw new IllegalArgumentException("Unsupported or invalid service in stack, clusterName=" + clusterName
@@@ -1114,4 -1058,4 +1130,4 @@@
      }
  
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
index 455d569,4ded10e..79aef9a
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
@@@ -115,6 -118,6 +115,10 @@@ public class PropertyHelper 
      return propertyIds == null ? Collections.<String>emptySet() : propertyIds;
    }
  
++  public static void setPropertyIds(Resource.Type resourceType, Set<String> propertyIds) {
++    PROPERTY_IDS.put(resourceType.getInternalType(), propertyIds);
++  }
++
    /**
     * Extract the set of property ids from a component PropertyInfo map.
     *
@@@ -146,6 -149,6 +150,10 @@@
    public static Map<Resource.Type, String> getKeyPropertyIds(Resource.Type resourceType) {
      return KEY_PROPERTY_IDS.get(resourceType.getInternalType());
    }
++  
++  public static void setKeyPropertyIds(Resource.Type resourceType, Map<Resource.Type, String> keyPropertyKeys) {
++    KEY_PROPERTY_IDS.put(resourceType.getInternalType(), keyPropertyKeys);
++  }
  
    /**
     * Helper to get a property name from a string.

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index d2c0ea2,93e6393..0ab2263
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@@ -854,350 -560,4 +854,349 @@@ public class UpgradeContext 
      parameters.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "true");
      return parameters;
    }
 -}
 +
 +  /**
 +   * {@inheritDoc}
 +   */
 +  @Override
 +  public String toString() {
 +    return Objects.toStringHelper(this)
 +        .add("direction", m_direction)
 +        .add("type", m_type)
 +        .add("target", m_repositoryVersion).toString();
 +  }
 +
 +  /**
 +   * Gets whether a downgrade is allowed for this upgrade. If the direction is
 +   * {@link Direction#DOWNGRADE}, then this method always returns false.
 +   * Otherwise it will consule {@link UpgradePack#isDowngradeAllowed()}.
 +   *
 +   * @return {@code true} of a downgrade is allowed for this upgrade,
 +   *         {@code false} otherwise.
 +   */
 +  public boolean isDowngradeAllowed() {
 +    if (m_direction == Direction.DOWNGRADE) {
 +      return false;
 +    }
 +
 +    return m_upgradePack.isDowngradeAllowed();
 +  }
 +
 +  /**
 +   * @return
 +   */
 +  public boolean isPatchRevert() {
 +    return m_isRevert;
 +  }
 +
 +  /**
 +   * Builds a chain of {@link UpgradeRequestValidator}s to ensure that the
 +   * incoming request to create a new upgrade is valid.
 +   *
 +   * @param upgradeType
 +   *          the type of upgrade to build the validator for.
 +   * @return the validator which can check to ensure that the properties are
 +   *         valid.
 +   */
 +  private UpgradeRequestValidator buildValidator(UpgradeType upgradeType){
 +    UpgradeRequestValidator validator = new BasicUpgradePropertiesValidator();
 +    UpgradeRequestValidator preReqValidator = new PreReqCheckValidator();
 +    validator.setNextValidator(preReqValidator);
 +
 +    final UpgradeRequestValidator upgradeTypeValidator;
 +    switch (upgradeType) {
 +      case HOST_ORDERED:
 +        upgradeTypeValidator = new HostOrderedUpgradeValidator();
 +        break;
 +      case NON_ROLLING:
 +      case ROLLING:
 +      default:
 +        upgradeTypeValidator = null;
 +        break;
 +    }
 +
 +    preReqValidator.setNextValidator(upgradeTypeValidator);
 +    return validator;
 +  }
 +
 +  /**
 +   * The {@link UpgradeRequestValidator} contains the logic to check for correct
 +   * upgrade request properties and then pass the responsibility onto the next
 +   * validator in the chain.
 +   */
 +  private abstract class UpgradeRequestValidator {
 +    /**
 +     * The next validator.
 +     */
 +    UpgradeRequestValidator m_nextValidator;
 +
 +    /**
 +     * Sets the next validator in the chain.
 +     *
 +     * @param nextValidator
 +     *          the next validator to run, or {@code null} for none.
 +     */
 +    void setNextValidator(UpgradeRequestValidator nextValidator) {
 +      m_nextValidator = nextValidator;
 +    }
 +
 +    /**
 +     * Validates the upgrade request from this point in the chain.
 +     *
 +     * @param cluster
 +     * @param direction
 +     * @param type
 +     * @param upgradePack
 +     * @param requestMap
 +     * @throws AmbariException
 +     */
 +    final void validate(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException {
 +
 +      // run this instance's check
 +      check(cluster, direction, type, upgradePack, requestMap);
 +
 +      // pass along to the next
 +      if (null != m_nextValidator) {
 +        m_nextValidator.validate(cluster, direction, type, upgradePack, requestMap);
 +      }
 +    }
 +
 +    /**
 +     * Checks to ensure that upgrade request is valid given the specific
 +     * arguments.
 +     *
 +     * @param cluster
 +     * @param direction
 +     * @param type
 +     * @param upgradePack
 +     * @param requestMap
 +     * @throws AmbariException
 +     */
 +    abstract void check(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException;
 +  }
 +
 +  /**
 +   * The {@link BasicUpgradePropertiesValidator} ensures that the basic required
 +   * properties are present on the upgrade request.
 +   */
 +  private final class BasicUpgradePropertiesValidator extends UpgradeRequestValidator {
 +
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    public void check(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException {
 +
 +      if (direction == Direction.UPGRADE) {
 +        String repositoryVersionId = (String) requestMap.get(UPGRADE_REPO_VERSION_ID);
 +        if (StringUtils.isBlank(repositoryVersionId)) {
 +          throw new AmbariException(
 +              String.format("%s is required for upgrades", UPGRADE_REPO_VERSION_ID));
 +        }
 +      }
 +    }
 +  }
 +
 +  /**
 +   * The {@link PreReqCheckValidator} ensures that the upgrade pre-requisite
 +   * checks have passed.
 +   */
 +  private final class PreReqCheckValidator extends UpgradeRequestValidator {
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    void check(Cluster cluster, Direction direction, UpgradeType type, UpgradePack upgradePack,
 +        Map<String, Object> requestMap) throws AmbariException {
 +
 +      String repositoryVersionId = (String) requestMap.get(UPGRADE_REPO_VERSION_ID);
 +      boolean skipPrereqChecks = Boolean.parseBoolean((String) requestMap.get(UPGRADE_SKIP_PREREQUISITE_CHECKS));
 +      boolean failOnCheckWarnings = Boolean.parseBoolean((String) requestMap.get(UPGRADE_FAIL_ON_CHECK_WARNINGS));
 +      String preferredUpgradePack = requestMap.containsKey(UPGRADE_PACK) ? (String) requestMap.get(UPGRADE_PACK) : null;
 +
 +      // verify that there is not an upgrade or downgrade that is in progress or suspended
 +      UpgradeEntity existingUpgrade = cluster.getUpgradeInProgress();
 +      if (null != existingUpgrade) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s as another %s (request ID %s) is in progress.",
 +                direction.getText(false), existingUpgrade.getDirection().getText(false),
 +                existingUpgrade.getRequestId()));
 +      }
 +
 +      // skip this check if it's a downgrade or we are instructed to skip it
 +      if (direction.isDowngrade() || skipPrereqChecks) {
 +        return;
 +      }
 +
 +      RepositoryVersionEntity repositoryVersion = m_repoVersionDAO.findByPK(
 +          Long.valueOf(repositoryVersionId));
 +
 +      // Validate pre-req checks pass
 +      PreUpgradeCheckResourceProvider provider = (PreUpgradeCheckResourceProvider) AbstractControllerResourceProvider.getResourceProvider(
 +          Resource.Type.PreUpgradeCheck);
 +
 +      Predicate preUpgradeCheckPredicate = new PredicateBuilder().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals(cluster.getClusterName()).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals(repositoryVersion.getVersion()).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID).equals(m_isRevert).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(type).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals(preferredUpgradePack).toPredicate();
 +
 +      Request preUpgradeCheckRequest = PropertyHelper.getReadRequest();
 +
 +      Set<Resource> preUpgradeCheckResources;
 +      try {
 +        preUpgradeCheckResources = provider.getResources(
 +            preUpgradeCheckRequest, preUpgradeCheckPredicate);
 +      } catch (NoSuchResourceException|SystemException|UnsupportedPropertyException|NoSuchParentResourceException e) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s. Prerequisite checks could not be run",
 +                direction.getText(false), e));
 +      }
 +
 +      List<Resource> failedResources = new LinkedList<>();
 +      if (preUpgradeCheckResources != null) {
 +        for (Resource res : preUpgradeCheckResources) {
 +          PrereqCheckStatus prereqCheckStatus = (PrereqCheckStatus) res.getPropertyValue(
 +              PreUpgradeCheckResourceProvider.UPGRADE_CHECK_STATUS_PROPERTY_ID);
 +
 +          if (prereqCheckStatus == PrereqCheckStatus.FAIL
 +              || (failOnCheckWarnings && prereqCheckStatus == PrereqCheckStatus.WARNING)) {
 +            failedResources.add(res);
 +          }
 +        }
 +      }
 +
 +      if (!failedResources.isEmpty()) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s. Prerequisite checks failed %s",
 +                direction.getText(false), m_gson.toJson(failedResources)));
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Ensures that for {@link UpgradeType#HOST_ORDERED}, the properties supplied
 +   * are valid.
 +   */
 +  @SuppressWarnings("unchecked")
 +  private final class HostOrderedUpgradeValidator extends UpgradeRequestValidator {
 +
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    void check(Cluster cluster, Direction direction, UpgradeType type, UpgradePack upgradePack,
 +        Map<String, Object> requestMap) throws AmbariException {
 +
 +      String skipFailuresRequestProperty = (String) requestMap.get(UPGRADE_SKIP_FAILURES);
 +      if (Boolean.parseBoolean(skipFailuresRequestProperty)) {
 +        throw new AmbariException(
 +            String.format("The %s property is not valid when creating a %s upgrade.",
 +                UPGRADE_SKIP_FAILURES, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      String skipManualVerification = (String) requestMap.get(UPGRADE_SKIP_MANUAL_VERIFICATION);
 +      if (Boolean.parseBoolean(skipManualVerification)) {
 +        throw new AmbariException(
 +            String.format("The %s property is not valid when creating a %s upgrade.",
 +                UPGRADE_SKIP_MANUAL_VERIFICATION, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      if (!requestMap.containsKey(UPGRADE_HOST_ORDERED_HOSTS)) {
 +        throw new AmbariException(
 +            String.format("The %s property is required when creating a %s upgrade.",
 +                UPGRADE_HOST_ORDERED_HOSTS, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      List<HostOrderItem> hostOrderItems = extractHostOrderItemsFromRequest(requestMap);
 +      List<String> hostsFromRequest = new ArrayList<>(hostOrderItems.size());
 +      for (HostOrderItem hostOrderItem : hostOrderItems) {
 +        if (hostOrderItem.getType() == HostOrderActionType.HOST_UPGRADE) {
 +          hostsFromRequest.addAll(hostOrderItem.getActionItems());
 +        }
 +      }
 +
 +      // ensure that all hosts for this cluster are accounted for
 +      Collection<Host> hosts = cluster.getHosts();
 +      Set<String> clusterHostNames = new HashSet<>(hosts.size());
 +      for (Host host : hosts) {
 +        clusterHostNames.add(host.getHostName());
 +      }
 +
 +      Collection<String> disjunction = CollectionUtils.disjunction(hostsFromRequest,
 +          clusterHostNames);
 +
 +      if (CollectionUtils.isNotEmpty(disjunction)) {
 +        throw new AmbariException(String.format(
 +            "The supplied list of hosts must match the cluster hosts in an upgrade of type %s. The following hosts are either missing or invalid: %s",
 +            UpgradeType.HOST_ORDERED, StringUtils.join(disjunction, ", ")));
 +      }
 +
 +      // verify that the upgradepack has the required grouping and set the
 +      // action items on it
 +      HostOrderGrouping hostOrderGrouping = null;
 +      List<Grouping> groupings = upgradePack.getGroups(direction);
 +      for (Grouping grouping : groupings) {
 +        if (grouping instanceof HostOrderGrouping) {
 +          hostOrderGrouping = (HostOrderGrouping) grouping;
 +          hostOrderGrouping.setHostOrderItems(hostOrderItems);
 +        }
 +      }
 +    }
 +
 +    /**
 +     * Builds the list of {@link HostOrderItem}s from the upgrade request. If
 +     * the upgrade request does not contain the hosts
 +     *
 +     * @param requestMap
 +     *          the map of properties from the request (not {@code null}).
 +     * @return the ordered list of actions to orchestrate for the
 +     *         {@link UpgradeType#HOST_ORDERED} upgrade.
 +     * @throws AmbariException
 +     *           if the request properties are not valid.
 +     */
 +    private List<HostOrderItem> extractHostOrderItemsFromRequest(Map<String, Object> requestMap)
 +        throws AmbariException {
 +      // ewwww
 +      Set<Map<String, List<String>>> hostsOrder = (Set<Map<String, List<String>>>) requestMap.get(
 +          UPGRADE_HOST_ORDERED_HOSTS);
 +
 +      if (CollectionUtils.isEmpty(hostsOrder)) {
 +        throw new AmbariException(
 +            String.format("The %s property must be specified when using a %s upgrade type.",
 +                UPGRADE_HOST_ORDERED_HOSTS, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      List<HostOrderItem> hostOrderItems = new ArrayList<>();
 +
 +      // extract all of the hosts so that we can ensure they are all accounted
 +      // for
 +      Iterator<Map<String, List<String>>> iterator = hostsOrder.iterator();
 +      while (iterator.hasNext()) {
 +        Map<String, List<String>> grouping = iterator.next();
 +        List<String> hosts = grouping.get("hosts");
 +        List<String> serviceChecks = grouping.get("service_checks");
 +
 +        if (CollectionUtils.isEmpty(hosts) && CollectionUtils.isEmpty(serviceChecks)) {
 +          throw new AmbariException(String.format(
 +              "The %s property must contain at least one object with either a %s or %s key",
 +              UPGRADE_HOST_ORDERED_HOSTS, "hosts", "service_checks"));
 +        }
 +
 +        if (CollectionUtils.isNotEmpty(hosts)) {
 +          hostOrderItems.add(new HostOrderItem(HostOrderActionType.HOST_UPGRADE, hosts));
 +        }
 +
 +        if (CollectionUtils.isNotEmpty(serviceChecks)) {
 +          hostOrderItems.add(new HostOrderItem(HostOrderActionType.SERVICE_CHECK, serviceChecks));
 +        }
 +      }
 +
 +      return hostOrderItems;
 +    }
 +  }
- 
- }
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index ebd3468,ebd3468..345f598
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@@ -74,6 -74,6 +74,7 @@@ import org.apache.ambari.server.state.C
  import org.apache.ambari.server.state.DesiredConfig;
  import org.apache.ambari.server.state.Host;
  import org.apache.ambari.server.state.SecurityType;
++import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.configgroup.ConfigGroup;
  import org.apache.ambari.server.utils.RetryHelper;
  import org.slf4j.Logger;
@@@ -187,9 -187,9 +188,10 @@@ public class AmbariContext 
  
    public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
      Stack stack = topology.getBlueprint().getStack();
++    StackId stackId = new StackId(stack.getName(), stack.getVersion());
  
      createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
--    createAmbariServiceAndComponentResources(topology, clusterName);
++    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
    }
  
    public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@@ -216,7 -216,7 +218,8 @@@
      }
    }
  
--  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
++  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
++      StackId stackId, String repositoryVersion) {
      Collection<String> services = topology.getBlueprint().getServices();
  
      try {
@@@ -229,7 -229,7 +232,9 @@@
      Set<ServiceComponentRequest> componentRequests = new HashSet<>();
      for (String service : services) {
        String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
--      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
++      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
++          repositoryVersion, null, credentialStoreEnabled));
++
        for (String component : topology.getBlueprint().getComponents(service)) {
          String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
          componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@@ -250,7 -250,7 +255,7 @@@
      startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
      startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
      Predicate predicate = new EqualsPredicate<>(
--        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
++      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
      try {
        getServiceResourceProvider().updateResources(
            new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@@ -284,7 -284,7 +289,7 @@@
  
      Map<String, Object> properties = new HashMap<>();
      properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
--    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
++    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
      properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
  
      try {
@@@ -740,4 -740,4 +745,4 @@@
      }
      return componentResourceProvider;
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 6d2ab84,ea1b034..0fcf779
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@@ -24,12 -28,15 +28,17 @@@ import java.util.Set
  
  import org.apache.ambari.server.AmbariException;
  import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+ import org.apache.ambari.server.orm.dao.ClusterDAO;
 -import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
++import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+ import org.apache.ambari.server.orm.entities.ClusterEntity;
  import org.apache.ambari.server.state.Cluster;
  import org.apache.ambari.server.state.Clusters;
  import org.apache.ambari.server.state.Config;
  import org.apache.ambari.server.state.ConfigHelper;
  import org.apache.ambari.server.state.PropertyInfo;
  import org.apache.commons.lang.StringUtils;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
  
  import com.google.common.collect.Sets;
  import com.google.inject.Inject;
@@@ -54,6 -63,13 +63,13 @@@ public class UpgradeCatalog252 extends 
  
    private static final String CLUSTER_ENV = "cluster-env";
  
+   private static final List<String> configTypesToEnsureSelected = Arrays.asList("spark2-javaopts-properties");
 -  
++
+   /**
+    * Logger.
+    */
+   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog252.class);
+ 
    /**
     * Constructor.
     *
@@@ -196,4 -214,86 +214,86 @@@
        updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
      }
    }
+ 
+   /**
+    * When doing a cross-stack upgrade, we found that one config type (spark2-javaopts-properties)
+    * did not have any mappings that were selected, so it caused Ambari Server start to fail on the DB Consistency Checker.
+    * To fix this, iterate over all config types and ensure that at least one is selected.
+    * If none are selected, then pick the one with the greatest time stamp; this should be safe since we are only adding
+    * more data to use as opposed to removing.
+    */
+   private void ensureConfigTypesHaveAtLeastOneVersionSelected() {
+     ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+     List<ClusterEntity> clusters = clusterDAO.findAll();
+ 
+     if (null == clusters) {
+       return;
+     }
+ 
+     for (ClusterEntity clusterEntity : clusters) {
+       LOG.info("Ensuring all config types have at least one selected config for cluster {}", clusterEntity.getClusterName());
+ 
+       boolean atLeastOneChanged = false;
 -      Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
++      Collection<ClusterConfigEntity> configEntities = clusterEntity.getClusterConfigEntities();
+ 
 -      if (configMappingEntities != null) {
++      if (configEntities != null) {
+         Set<String> configTypesNotSelected = new HashSet<>();
+         Set<String> configTypesWithAtLeastOneSelected = new HashSet<>();
+ 
 -        for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
 -          String typeName = clusterConfigMappingEntity.getType();
++        for (ClusterConfigEntity clusterConfigEntity : configEntities) {
++          String typeName = clusterConfigEntity.getType();
+ 
 -          if (clusterConfigMappingEntity.isSelected() == 1) {
++          if (clusterConfigEntity.isSelected()) {
+             configTypesWithAtLeastOneSelected.add(typeName);
+           } else {
+             configTypesNotSelected.add(typeName);
+           }
+         }
+ 
+         // Due to the ordering, eliminate any configs with at least one selected.
+         configTypesNotSelected.removeAll(configTypesWithAtLeastOneSelected);
+         if (!configTypesNotSelected.isEmpty()) {
 -          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
++          LOG.info("The following config types have entries which are not enabled: {}", StringUtils.join(configTypesNotSelected, ", "));
+ 
+           LOG.info("Filtering only config types these config types: {}", StringUtils.join(configTypesToEnsureSelected, ", "));
+           // Get the intersection with a subset of configs that are allowed to be selected during the migration.
+           configTypesNotSelected.retainAll(configTypesToEnsureSelected);
+         }
+ 
+         if (!configTypesNotSelected.isEmpty()) {
 -          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
++          LOG.info("The following config types have entries which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
+ 
+           for (String typeName : configTypesNotSelected) {
 -            ClusterConfigMappingEntity clusterConfigMappingWithGreatestTimeStamp = null;
++            ClusterConfigEntity clusterConfigMappingWithGreatestTimeStamp = null;
+ 
 -            for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
 -              if (typeName.equals(clusterConfigMappingEntity.getType())) {
++            for (ClusterConfigEntity clusterConfigEntity : configEntities) {
++              if (typeName.equals(clusterConfigEntity.getType())) {
+ 
+                 if (null == clusterConfigMappingWithGreatestTimeStamp) {
 -                  clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
++                  clusterConfigMappingWithGreatestTimeStamp = clusterConfigEntity;
+                 } else {
 -                  if (clusterConfigMappingEntity.getCreateTimestamp() >= clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp()) {
 -                    clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
++                  if (clusterConfigEntity.getTimestamp() >= clusterConfigMappingWithGreatestTimeStamp.getTimestamp()) {
++                    clusterConfigMappingWithGreatestTimeStamp = clusterConfigEntity;
+                   }
+                 }
+               }
+             }
+ 
+             if (null != clusterConfigMappingWithGreatestTimeStamp) {
+               LOG.info("Saving. Config type {} has a mapping with tag {} and greatest timestamp {} that is not selected, so will mark it selected.",
 -                  typeName, clusterConfigMappingWithGreatestTimeStamp.getTag(), clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp());
++                  typeName, clusterConfigMappingWithGreatestTimeStamp.getTag(), clusterConfigMappingWithGreatestTimeStamp.getTimestamp());
+               atLeastOneChanged = true;
 -              clusterConfigMappingWithGreatestTimeStamp.setSelected(1);
++              clusterConfigMappingWithGreatestTimeStamp.setSelected(true);
+             }
+           }
+         } else {
+           LOG.info("All config types have at least one mapping that is selected. Nothing to do.");
+         }
+       }
+ 
+       if (atLeastOneChanged) {
 -        clusterDAO.mergeConfigMappings(configMappingEntities);
++        clusterDAO.merge(clusterEntity);
+       }
+     }
+   }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
index 81fa8e1,81fa8e1..0000000
deleted file mode 100644,100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
+++ /dev/null
@@@ -1,264 -1,264 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--
--package org.apache.ambari.server.orm.dao;
--
--import java.sql.SQLException;
--
--import org.apache.ambari.server.AmbariException;
--import org.apache.ambari.server.H2DatabaseCleaner;
--import org.apache.ambari.server.orm.GuiceJpaInitializer;
--import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
--import org.apache.ambari.server.orm.OrmTestHelper;
--import org.apache.ambari.server.orm.entities.ClusterEntity;
--import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
--import org.apache.ambari.server.state.RepositoryVersionState;
--import org.apache.ambari.server.state.StackId;
--import org.junit.After;
--import org.junit.Assert;
--import org.junit.Before;
--import org.junit.Test;
--
--import com.google.inject.Guice;
--import com.google.inject.Injector;
--
--/**
-- * ClusterVersionDAO unit tests.
-- */
--public class ClusterVersionDAOTest {
--
--  private static Injector injector;
--  private ClusterVersionDAO clusterVersionDAO;
--  private ClusterDAO clusterDAO;
--  private OrmTestHelper helper;
--
--  private long clusterId;
--  ClusterEntity cluster;
--  private int lastStep = -1;
--
--  ClusterVersionEntity cvA;
--  long cvAId = 0L;
--
--  ClusterVersionEntity cvB;
--  long cvBId = 0L;
--
--  ClusterVersionEntity cvC;
--  long cvCId = 0L;
--
--  private final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
--  private final static StackId BAD_STACK = new StackId("BADSTACK", "1.0");
--
--  @Before
--  public void before() {
--    injector = Guice.createInjector(new InMemoryDefaultTestModule());
--    injector.getInstance(GuiceJpaInitializer.class);
--
--    clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
--    clusterDAO = injector.getInstance(ClusterDAO.class);
--    helper = injector.getInstance(OrmTestHelper.class);
--  }
--
--  /**
--   * Helper function to transition the cluster through several cluster versions.
--   * @param currStep Step to go to is a value from 1 - 7, inclusive.
--   */
--  private void createRecordsUntilStep(int currStep) throws Exception {
--    // Fresh install on A
--    if (currStep >= 1 && lastStep <= 0) {
--      clusterId = helper.createCluster();
--      cluster = clusterDAO.findById(clusterId);
--
--      cvA = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.0-995"), RepositoryVersionState.CURRENT, System.currentTimeMillis(), System.currentTimeMillis(), "admin");
--      clusterVersionDAO.create(cvA);
--      cvAId = cvA.getId();
--    } else {
--      cluster = clusterDAO.findById(clusterId);
--      cvA = clusterVersionDAO.findByPK(cvAId);
--    }
--
--    // Install B
--    if (currStep >= 2) {
--      if (lastStep <= 1) {
--        cvB = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.1-998"), RepositoryVersionState.INSTALLED, System.currentTimeMillis(), System.currentTimeMillis(), "admin");
--        clusterVersionDAO.create(cvB);
--        cvBId = cvB.getId();
--      } else {
--        cvB = clusterVersionDAO.findByPK(cvBId);
--      }
--    }
--
--    // Switch from A to B
--    if (currStep >= 3 && lastStep <= 2) {
--      cvA.setState(RepositoryVersionState.INSTALLED);
--      cvB.setState(RepositoryVersionState.CURRENT);
--      clusterVersionDAO.merge(cvA);
--      clusterVersionDAO.merge(cvB);
--    }
--
--    // Start upgrading C
--    if (currStep >= 4) {
--      if (lastStep <= 3) {
--        cvC = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.0-100"), RepositoryVersionState.INSTALLING, System.currentTimeMillis(), "admin");
--        clusterVersionDAO.create(cvC);
--        cvCId = cvC.getId();
--      } else {
--        cvC = clusterVersionDAO.findByPK(cvCId);
--      }
--    }
--
--    // Fail upgrade for C
--    if (currStep >= 5 && lastStep <= 4) {
--        cvC.setState(RepositoryVersionState.INSTALL_FAILED);
--        cvC.setEndTime(System.currentTimeMillis());
--        clusterVersionDAO.merge(cvC);
--    }
--
--    // Retry upgrade on C
--    if (currStep >= 6 && lastStep <= 5) {
--        cvC.setState(RepositoryVersionState.INSTALLING);
--        cvC.setEndTime(0L);
--        clusterVersionDAO.merge(cvC);
--    }
--
--    // Finalize upgrade on C to make it the current cluster version
--    if (currStep >= 7 && lastStep <= 6) {
--        cvC.setState(RepositoryVersionState.CURRENT);
--        cvC.setEndTime(System.currentTimeMillis());
--        clusterVersionDAO.merge(cvC);
--
--        cvA.setState(RepositoryVersionState.INSTALLED);
--        cvB.setState(RepositoryVersionState.INSTALLED);
--        clusterVersionDAO.merge(cvA);
--        clusterVersionDAO.merge(cvB);
--    }
--
--    lastStep = currStep;
--  }
--
--  @Test
--  public void testFindByStackAndVersion() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(
--        0,
--        clusterVersionDAO.findByStackAndVersion("non existing", "non existing",
--            "non existing").size());
--
--    Assert.assertEquals(
--        1,
--        clusterVersionDAO.findByStackAndVersion(HDP_22_STACK.getStackName(),
--            HDP_22_STACK.getStackVersion(), "2.2.0.0-995").size());
--  }
--
--  @Test
--  public void testFindByCluster() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(0, clusterVersionDAO.findByCluster("non existing").size());
--    Assert.assertEquals(1, clusterVersionDAO.findByCluster(cluster.getClusterName()).size());
--  }
--
--  @Test
--  public void testFindByClusterAndStackAndVersion() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertNull(clusterVersionDAO.findByClusterAndStackAndVersion(
--        cluster.getClusterName(), BAD_STACK, "non existing"));
--
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStackAndVersion(
--        cluster.getClusterName(), HDP_22_STACK, "2.2.0.0-995"));
--  }
--
--  /**
--   * At all times the cluster should have a cluster version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}
--   */
--  @Test
--  public void testFindByClusterAndStateCurrent() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(2);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(3);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(4);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(5);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(6);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(7);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--  }
--
--  /**
--   * Test the state of certain cluster versions.
--   */
--  @Test
--  public void testFindByClusterAndState() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(2);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(3);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(4);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(5);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(6);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(7);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(2, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--  }
--
--  @After
--  public void after() throws AmbariException, SQLException {
--    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
--    injector = null;
--  }
--}


[30/50] [abbrv] ambari git commit: AMBARI-21480. NPE during "Update Kerberos Descriptor" (rlevas)

Posted by jo...@apache.org.
AMBARI-21480. NPE during "Update Kerberos Descriptor" (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/48bc7635
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/48bc7635
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/48bc7635

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 48bc7635e5be563210a69d7342d3d9b957075864
Parents: ec3cf22
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 16:43:25 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 16:43:25 2017 -0400

----------------------------------------------------------------------
 .../KerberosDescriptorUpdateHelper.java         |  9 ++-
 .../KerberosDescriptorUpdateHelperTest.java     | 70 ++++++++++++++++++++
 2 files changed, 77 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/48bc7635/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
index 2eef4b9..8da32a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
@@ -340,6 +340,11 @@ public class KerberosDescriptorUpdateHelper {
   /**
    * Processes a {@link KerberosIdentityDescriptor} to change the user-supplied data based on the changes
    * observed between the previous stack version's data and the new stack version's data.
+   * <p>
+   * It is expected that <code>newStackIdentities</code> and <code>userIdentities</code> are not null.
+   * However, <code>previousStackIdentities</code> may be null in the event the user added a Kerberos
+   * identity that was then added in the new Kerberos descriptor.  In this case, the user's values
+   * for the principal name and keytab file are kept while adding any other changes from tne new stack.
    *
    * @param previousStackIdentity a {@link KerberosIdentityDescriptor} from the previous stack version's Kerberos descriptor
    * @param newStackIdentity      a {@link KerberosIdentityDescriptor} from the new stack version's Kerberos descriptor
@@ -357,7 +362,7 @@ public class KerberosDescriptorUpdateHelper {
     // If the new identity definition is a reference and no longer has a principal definition,
     // Ignore any user changes to the old principal definition.
     if (updatedValuePrincipal != null) {
-      KerberosPrincipalDescriptor oldValuePrincipal = previousStackIdentity.getPrincipalDescriptor();
+      KerberosPrincipalDescriptor oldValuePrincipal = (previousStackIdentity == null) ? null : previousStackIdentity.getPrincipalDescriptor();
       String previousValuePrincipalValue = null;
       KerberosPrincipalDescriptor userValuePrincipal = userIdentity.getPrincipalDescriptor();
       String userValuePrincipalValue = null;
@@ -380,7 +385,7 @@ public class KerberosDescriptorUpdateHelper {
     // If the new identity definition is a reference and no longer has a keytab definition,
     // Ignore any user changes to the old keytab definition.
     if (updatedValueKeytab != null) {
-      KerberosKeytabDescriptor oldValueKeytab = previousStackIdentity.getKeytabDescriptor();
+      KerberosKeytabDescriptor oldValueKeytab = (previousStackIdentity == null) ? null : previousStackIdentity.getKeytabDescriptor();
       String previousValueKeytabFile = null;
       KerberosKeytabDescriptor userValueKeytab = userIdentity.getKeytabDescriptor();
       String userValueKeytabFile = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/48bc7635/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index 1a78ea8..8055c35 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@ -269,6 +269,16 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "      \"keytab\": {" +
             "        \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
             "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"future_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"CHANGED_future${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/future.user.keytab\"" +
+            "      }" +
             "    }" +
             "  ]" +
             "}");
@@ -325,6 +335,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "      \"keytab\": {" +
             "        \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
             "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"custom_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"custom${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/custom.user.keytab\"" +
+            "      }" +
+            "    }," +
+            "    {" +
+            "      \"name\": \"future_identity\"," +
+            "      \"principal\": {" +
+            "        \"value\": \"future${principal_suffix}@${realm}\"," +
+            "        \"type\": \"user\"" +
+            "      }," +
+            "      \"keytab\": {" +
+            "        \"file\": \"${keytab_dir}/future.user.keytab\"" +
+            "      }" +
             "    }" +
             "  ]" +
             "}");
@@ -340,6 +370,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
             "{\n" +
                 "  \"identities\": [\n" +
                 "    {\n" +
+                "      \"name\": \"future_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"future${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
+                "      \"name\": \"custom_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
                 "      \"name\": \"spnego\",\n" +
                 "      \"principal\": {\n" +
                 "        \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +
@@ -402,6 +452,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
                 "      }\n" +
                 "    },\n" +
                 "    {\n" +
+                "      \"name\": \"custom_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
+                "      \"name\": \"future_identity\",\n" +
+                "      \"principal\": {\n" +
+                "        \"value\": \"future${principal_suffix}@${realm}\",\n" +
+                "        \"type\": \"user\"\n" +
+                "      },\n" +
+                "      \"keytab\": {\n" +
+                "        \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+                "      }\n" +
+                "    },\n" +
+                "    {\n" +
                 "      \"name\": \"spnego\",\n" +
                 "      \"principal\": {\n" +
                 "        \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +


[10/50] [abbrv] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
new file mode 100755
index 0000000..3cb7aef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
@@ -0,0 +1,128 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+titan_user = config['configurations']['titan-env']['titan_user']
+user_group = config['configurations']['cluster-env']['user_group']
+titan_bin_dir = '/usr/iop/current/titan-client/bin'
+
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+# titan configurations
+titan_conf_dir = "/usr/iop/current/titan-client/conf"
+titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
+titan_env_props = config['configurations']['titan-env']['content']
+log4j_console_props = config['configurations']['titan-log4j']['content']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_config_dir = '/etc/hadoop/conf'
+hbase_config_dir = '/etc/hbase/conf'
+
+# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
+# The host name should be zooKeeper quorum
+storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
+storage_host_list = []
+for hostname in storage_hosts:
+  storage_host_list.append(hostname)
+storage_host = ",".join(storage_host_list)
+hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+# Solr cloud host
+solr_hosts = config['clusterHostInfo']['solr_hosts']
+solr_host_list = []
+for hostname in solr_hosts:
+  solr_host_list.append(hostname)
+solr_host = ",".join(solr_host_list)
+solr_server_host = solr_hosts[0]
+
+# Titan client, it does not work right now, there is no 'titan_host' in 'clusterHostInfo'
+# It will return "Configuration parameter 'titan_host' was not found in configurations dictionary!"
+# So here is a known issue as task 118900, will install titan and solr on same node right now.
+# titan_host = config['clusterHostInfo']['titan_host']
+titan_host = solr_server_host
+
+# Conf directory and jar should be copy to solr site
+titan_dir = format('/usr/iop/current/titan-client')
+titan_ext_dir = format('/usr/iop/current/titan-client/ext')
+titan_solr_conf_dir = format('/usr/iop/current/titan-client/conf/solr')
+titan_solr_jar_file = format('/usr/iop/current/titan-client/lib/jts-1.13.jar')
+
+titan_solr_hdfs_dir = "/apps/titan"
+titan_solr_hdfs_conf_dir = "/apps/titan/conf"
+titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
+titan_tmp_dir = format('{tmp_dir}/titan')
+titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
+configuration_tags = config['configurationTags']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+titan_hdfs_mode = 0775
+
+#for create_hdfs_directory
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
new file mode 100755
index 0000000..3c011a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate, StaticFile
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanServiceCheck(Script):
+    pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanServiceCheckLinux(TitanServiceCheck):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        File( format("{tmp_dir}/titanSmoke.groovy"),
+              content = StaticFile("titanSmoke.groovy"),
+              mode = 0755
+              )
+
+        if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.2') >= 0:
+            if params.security_enabled:
+                kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+                Execute(kinit_cmd,
+                        user=params.smokeuser
+                        )
+
+            Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
+                    tries     = 3,
+                    try_sleep = 5,
+                    path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                    user      = params.smokeuser,
+                    logoutput = True
+                    )
+
+if __name__ == "__main__":
+    # print "Track service check status"
+    TitanServiceCheckLinux().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
new file mode 100755
index 0000000..fd94c82
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
@@ -0,0 +1,70 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.core.source import InlineTemplate
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def titan():
+    import params
+
+    Directory(params.titan_conf_dir,
+               create_parents = True,
+               owner=params.titan_user,
+               group=params.user_group
+               )
+
+    File(format("{params.titan_conf_dir}/titan-env.sh"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.titan_env_props)
+             )
+
+    # titan-hbase-solr_properties is always set to a default even if it's not in the payload
+    File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hbase_solr_props)
+         )
+
+    if (params.log4j_console_props != None):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.log4j_console_props)
+             )
+    elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user
+             )
+    # Change titan ext directory for multiple user access
+    Directory(params.titan_ext_dir,
+               create_parents = True,
+               owner=params.titan_user,
+               group=params.user_group,
+               mode=0775
+               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
new file mode 100755
index 0000000..d54ccee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from titan import titan
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanClient(Script):
+    def configure(self, env):
+        import params
+        env.set_params(params)
+        titan()
+
+    def status(self, env):
+        raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanClientLinux(TitanClient):
+    def get_component_name(self):
+        return "titan-client"
+
+    def pre_rolling_restart(self, env):
+        import params
+        env.set_params(params)
+
+        if params.version and compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:
+            conf_select.select(params.stack_name, "titan", params.version)
+            stack_select.select("titan-client", params.version)
+
+    def install(self, env):
+        self.install_packages(env)
+        self.configure(env)
+
+if __name__ == "__main__":
+    TitanClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 4867626..b66c234 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -107,7 +107,7 @@
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
         <task xsi:type="execute" hosts="master">
           <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
+          <function>take_snapshot</function>
         </task>
       </execute-stage>
 


[45/50] [abbrv] ambari git commit: AMBARI-21499 Move Stack-Specific Custom Actions Under the Stack Dropdown. (atkach)

Posted by jo...@apache.org.
AMBARI-21499 Move Stack-Specific Custom Actions Under the Stack Dropdown. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15349983
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15349983
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15349983

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 15349983ed14804b83f99281b344a2339dcbf8c8
Parents: 1e09ad0
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 17 19:38:04 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 17 19:38:04 2017 +0300

----------------------------------------------------------------------
 .../main/admin/stack_and_upgrade_controller.js  | 22 ++++++++++
 .../main/admin/stack_upgrade/versions.hbs       |  9 +---
 .../stack_upgrade/upgrade_version_box_view.js   | 11 ++++-
 .../main/admin/stack_upgrade/versions_view.js   | 26 -----------
 .../admin/stack_and_upgrade_controller_test.js  | 19 ++++++++
 .../admin/stack_upgrade/version_view_test.js    | 46 --------------------
 6 files changed, 52 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index db4df7f..8527e29 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -1431,6 +1431,28 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
     });
   },
 
+  removeIopSelect: function() {
+    return App.showConfirmationPopup(function () {
+      App.ajax.send({
+        name: 'admin.stack_versions.removeIopSelect',
+        sender: this,
+        data: {
+          hosts: App.get('allHostNames').join(',')
+        }
+      }).fail(function(xhr) {
+        App.ModalPopup.show({
+          header: Em.I18n.t('admin.stackVersions.removeIopSelect.fail'),
+          secondary: false,
+          bodyClass: App.AjaxDefaultErrorPopupBodyView.extend({
+            type: 'POST',
+            status: xhr.status,
+            message: xhr.responseText
+          })
+        });
+      });
+    });
+  },
+
   /**
    * sends request to install repoVersion to the cluster
    * and create clusterStackVersion resourse

http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
index b7fac72..c185f10 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
@@ -40,13 +40,6 @@
       {{/each}}
     </ul>
   </div>
-  {{#isAuthorized "AMBARI.MANAGE_STACK_VERSIONS"}}
-    {{#if view.showRemoveIopSelect}}
-      <button class="btn btn-danger pull-right" {{action removeIopSelect target="view"}} id="remove-iop-select">
-        {{t admin.stackVersions.removeIopSelect}}
-      </button>
-    {{/if}}
-  {{/isAuthorized}}
 </div>
 <div id="versions-section" class="row-fluid">
   <div class="span2 left-menu-table">
@@ -75,4 +68,4 @@
       {{view App.SpinnerView}}
     {{/if}}
   </div>
-</div>
\ No newline at end of file
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index 46ef020..956339f 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -195,6 +195,13 @@ App.UpgradeVersionBoxView = Em.View.extend({
             action: 'installRepoVersionConfirmation',
             isDisabled: isDisabled
           });
+          if (App.get('currentStackName') === 'BigInsights' && !App.get('upgradeIsRunning')) {
+            element.get('buttons').pushObject({
+              text: Em.I18n.t('admin.stackVersions.removeIopSelect'),
+              action: 'removeIopSelect',
+              isDisabled: isDisabled
+            });
+          }
         }
         element.set('isDisabled', isDisabled);
       }
@@ -241,7 +248,9 @@ App.UpgradeVersionBoxView = Em.View.extend({
     'isUpgrading',
     'controller.requestInProgress',
     'controller.requestInProgressRepoId',
-    'parentView.repoVersions.@each.status'
+    'parentView.repoVersions.@each.status',
+    'App.currentStackName',
+    'App.upgradeIsRunning'
   ),
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
index 699ee5f..c87cb81 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
@@ -243,32 +243,6 @@ App.MainAdminStackVersionsView = Em.View.extend({
         self.doPolling();
       });
     }
-  },
-
-  showRemoveIopSelect: function() {
-    return App.get('currentStackName') === 'BigInsights' && !App.get('upgradeIsRunning');
-  }.property('App.currentStackName', 'App.upgradeIsRunning'),
-
-  removeIopSelect: function() {
-    return App.showConfirmationPopup(function () {
-      App.ajax.send({
-        name: 'admin.stack_versions.removeIopSelect',
-        sender: this,
-        data: {
-          hosts: App.get('allHostNames').join(',')
-        }
-      }).fail(function(xhr) {
-        App.ModalPopup.show({
-          header: Em.I18n.t('admin.stackVersions.removeIopSelect.fail'),
-          secondary: false,
-          bodyClass: App.AjaxDefaultErrorPopupBodyView.extend({
-            type: 'POST',
-            status: xhr.status,
-            message: xhr.responseText
-          })
-        });
-      });
-    });
   }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index 2098fd6..c346c6f 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -3400,4 +3400,23 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     });
   });
 
+  describe('#removeIopSelect', function() {
+    beforeEach(function() {
+      sinon.stub(App, 'showConfirmationPopup', Em.clb);
+      sinon.stub(App, 'showAlertPopup');
+      sinon.stub(App.ModalPopup, 'show');
+    });
+    afterEach(function() {
+      App.showConfirmationPopup.restore();
+      App.showAlertPopup.restore();
+      App.ModalPopup.show.restore();
+    });
+
+    it('App.ajax.send should be called', function() {
+      controller.removeIopSelect();
+      var args = testHelpers.findAjaxRequest('name', 'admin.stack_versions.removeIopSelect');
+      expect(args[0]).exists;
+    });
+  });
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/15349983/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js
index 6da0a19..cf9906f 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js
@@ -485,50 +485,4 @@ describe('App.mainAdminStackVersionsView', function () {
       expect(view.get('controller').load.called).to.be.false;
     });
   });
-
-  describe('#showRemoveIopSelect', function() {
-    beforeEach(function() {
-      this.mockApp = sinon.stub(App, 'get');
-    });
-    afterEach(function() {
-      this.mockApp.restore();
-    });
-
-    it('should be true when BigInsights stack and upgrade not running', function() {
-      this.mockApp.withArgs('currentStackName').returns('BigInsights');
-      this.mockApp.withArgs('upgradeIsRunning').returns(false);
-      expect(view.get('showRemoveIopSelect')).to.be.true;
-    });
-
-    it('should be false when BigInsights stack and upgrade running', function() {
-      this.mockApp.withArgs('currentStackName').returns('BigInsights');
-      this.mockApp.withArgs('upgradeIsRunning').returns(true);
-      expect(view.get('showRemoveIopSelect')).to.be.false;
-    });
-
-    it('should be false when HDP stack and upgrade not running', function() {
-      this.mockApp.withArgs('currentStackName').returns('HDP');
-      this.mockApp.withArgs('upgradeIsRunning').returns(false);
-      expect(view.get('showRemoveIopSelect')).to.be.false;
-    });
-  });
-
-  describe('#removeIopSelect', function() {
-    beforeEach(function() {
-      sinon.stub(App, 'showConfirmationPopup', Em.clb);
-      sinon.stub(App, 'showAlertPopup');
-      sinon.stub(App.ModalPopup, 'show');
-    });
-    afterEach(function() {
-      App.showConfirmationPopup.restore();
-      App.showAlertPopup.restore();
-      App.ModalPopup.show.restore();
-    });
-
-    it('App.ajax.send should be called', function() {
-      view.removeIopSelect();
-      var args = testHelpers.findAjaxRequest('name', 'admin.stack_versions.removeIopSelect');
-      expect(args[0]).exists;
-    });
-  });
 });


[18/50] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21348

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21348


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/133baa53
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/133baa53
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/133baa53

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 133baa53fac0e6a6bb372e4183172236920fd54c
Parents: 94eb0dd a389f85
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Jul 13 12:38:57 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jul 13 12:38:57 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_tools.py          |  13 ++
 .../upgrades/ChangeStackReferencesAction.java   |   4 +-
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  41 ++--
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../common-services/JNBG/0.2.0/alerts.json      |  32 +++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 209 +++++++++++++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 ++++++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 ++++++++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++++++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 +++++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 +++++++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 ++++++++++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 +++++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 ++++++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 ++++++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 +++
 .../0.5.0.2.2/package/scripts/params_linux.py   |   8 +
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |   2 +-
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 +++++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 +++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 +++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 ++
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 ++
 .../R4ML/0.8.0/package/scripts/params.py        |  80 +++++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 ++++++++++++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++++
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 +++++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 ++
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++++
 .../0.10.0/package/scripts/service_check.py     |  43 ++++
 .../0.10.0/package/scripts/systemml_client.py   |  49 +++++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 +++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 +++++++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 ++++++++++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 ++++++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 ++++++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++++++++
 .../package/alerts/alert_check_titan_server.py  |  65 ++++++
 .../package/files/gremlin-server-script.sh      |  86 ++++++++
 .../package/files/tinkergraph-empty.properties  |  18 ++
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 ++
 .../TITAN/1.0.0/package/scripts/params.py       | 202 ++++++++++++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 +++++++++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 ++++++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 ++++++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 ++
 .../package/templates/titan_solr_jaas.conf.j2   |  26 +++
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 +++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++++
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++++
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 +++++
 .../4.2.5/upgrades/config-upgrade.xml           |  68 ++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 166 ++++++++++++++-
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 +++++++
 .../SYSTEMML/package/scripts/__init__.py        |  19 ++
 .../services/SYSTEMML/package/scripts/params.py |  40 ++++
 .../SYSTEMML/package/scripts/service_check.py   |  43 ++++
 .../SYSTEMML/package/scripts/systemml_client.py |  49 +++++
 .../services/TITAN/configuration/titan-env.xml  |  48 +++++
 .../TITAN/configuration/titan-hbase-solr.xml    |  67 ++++++
 .../TITAN/configuration/titan-log4j.xml         |  66 ++++++
 .../4.2/services/TITAN/kerberos.json            |  17 ++
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 ++++++++
 .../TITAN/package/files/titanSmoke.groovy       |  20 ++
 .../services/TITAN/package/scripts/params.py    | 128 ++++++++++++
 .../TITAN/package/scripts/service_check.py      |  64 ++++++
 .../4.2/services/TITAN/package/scripts/titan.py |  70 +++++++
 .../TITAN/package/scripts/titan_client.py       |  58 +++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  94 +++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 192 ++++++++++++++++-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++----
 .../HIVE/configuration/hive-interactive-env.xml |  62 +++---
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 ++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 .../ChangeStackReferencesActionTest.java        |   1 +
 94 files changed, 6360 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/133baa53/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/133baa53/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------


[36/50] [abbrv] ambari git commit: AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
index 0f8ce73..680ee47 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
@@ -56,4 +56,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only",False)
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  include_hosts = list(set(nm_hosts) - set(exclude_hosts))

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
index 6a7eea7..71c7bc1 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
          mode="f"
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           mode="f"
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
 
 
@@ -219,7 +226,14 @@ class ResourcemanagerDefault(Resourcemanager):
          group=user_group
     )
 
-    if params.update_exclude_file_only == False:
+    if params.include_hosts:
+      File(params.include_file_path,
+           content=Template("include_hosts_list.j2"),
+           owner=yarn_user,
+           group=user_group
+           )
+
+    if params.update_files_only == False:
       Execute(yarn_refresh_cmd,
             environment= {'PATH' : params.execute_path },
             user=yarn_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7764e387/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2 b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file


[46/50] [abbrv] ambari git commit: AMBARI-21500 - Support SPARK2 upgrade for BI 4.2.5 to HDP 2.6.2 (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21500 - Support SPARK2 upgrade for BI 4.2.5 to HDP 2.6.2 (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/13bcea0b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/13bcea0b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/13bcea0b

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 13bcea0b0148d4296f7354c4ca5ba49f719caec6
Parents: 1534998
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Jul 17 13:35:09 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Jul 17 13:35:09 2017 -0400

----------------------------------------------------------------------
 .../4.2.5/upgrades/config-upgrade.xml           | 11 +++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 51 ++++++++++++++++++++
 2 files changed, 62 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/13bcea0b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index f1a954e..8c009a7 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -210,5 +210,16 @@
         </changes>
       </component>
     </service>
+    
+    <service name="SPARK2">
+      <component name="SPARK2_CLIENT">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_spark2_yarn_queue">
+            <type>spark2-defaults</type>
+            <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/13bcea0b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 5f1e06c..7c1a9ce 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -64,6 +64,11 @@
         <component>SPARK_JOBHISTORYSERVER</component>
         <component>SPARK_THRIFTSERVER</component>
       </service>
+      
+      <service name="SPARK2">
+        <component>SPARK2_JOBHISTORYSERVER</component>
+        <component>SPARK2_THRIFTSERVER</component>
+      </service>
 
       <service name="HIVE">
         <component>WEBHCAT_SERVER</component>
@@ -272,6 +277,13 @@
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie server">
         <task xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" />
       </execute-stage>
+      
+      <!-- SPARK2 -->
+      <execute-stage service="SPARK2" component="SPARK2_CLIENT" title="Apply config changes for Spark">
+        <task xsi:type="configure" id="hdp_2_6_0_0_spark2_yarn_queue">
+          <summary>Add queue customization property</summary>
+        </task>
+      </execute-stage>      
     </group>
 
 
@@ -513,6 +525,17 @@
         <component>SPARK_THRIFTSERVER</component>
       </service>
     </group>
+    
+    <group xsi:type="restart" name="SPARK2" title="Spark2">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="SPARK2">
+        <component>SPARK2_JOBHISTORYSERVER</component>
+        <component>SPARK2_THRIFTSERVER</component>
+      </service>
+    </group>
 
     <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients">
       <service-check>false</service-check>
@@ -522,6 +545,15 @@
         <component>SPARK_CLIENT</component>
       </service>
     </group>
+    
+    <group xsi:type="restart" name="SPARK2_CLIENTS" title="Spark2 Clients">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+      <service name="SPARK2">
+        <component>SPARK2_CLIENT</component>
+      </service>
+    </group>
 
     <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new ShareLib. -->
     <group name="UPGRADE_OOZIE" title="Upgrade Oozie Database">
@@ -563,6 +595,7 @@
         <service>SLIDER</service>
         <service>HIVE</service>
         <service>SPARK</service>
+        <service>SPARK2</service>
         <service>OOZIE</service>
       </priority>
     </group>
@@ -908,6 +941,24 @@
         </upgrade>
       </component>
     </service>
+    
+    <service name="SPARK2">
+      <component name="SPARK2_JOBHISTORYSERVER">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+      <component name="SPARK2_THRIFTSERVER">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+      <component name="SPARK2_CLIENT">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
 
     <service name="OOZIE">
       <component name="OOZIE_SERVER">


[03/50] [abbrv] ambari git commit: AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a6ac40bc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a6ac40bc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a6ac40bc

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: a6ac40bc07e4c0bec207d0786e4e870a6fe194f2
Parents: 08f48c1
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 13:30:16 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:43:15 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  28 +++--
 .../actionmanager/ExecutionCommandWrapper.java  |   3 +-
 .../ambari/server/agent/ExecutionCommand.java   |  14 +--
 .../controller/ActionExecutionContext.java      |  30 +++---
 .../controller/AmbariActionExecutionHelper.java |   8 +-
 .../ClusterStackVersionResourceProvider.java    |   2 +-
 .../internal/UpgradeResourceProvider.java       | 107 +++++++++++--------
 .../upgrades/FinalizeUpgradeAction.java         |  18 +---
 .../upgrades/UpgradeUserKerberosDescriptor.java |  41 +++----
 .../ambari/server/state/UpgradeContext.java     |  31 +++---
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../ComponentVersionCheckActionTest.java        |   1 -
 .../upgrades/UpgradeActionTest.java             |   2 -
 .../UpgradeUserKerberosDescriptorTest.java      |  19 ++--
 .../src/test/python/TestStackFeature.py         |  44 ++++++--
 .../python/custom_actions/test_ru_set_all.py    |   6 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 22 files changed, 203 insertions(+), 182 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 7811e26..2c66728 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -104,7 +104,10 @@ def get_stack_feature_version(config):
 
   # something like 2.4.0.0-1234; represents the version for the command
   # (or None if this is a cluster install and it hasn't been calculated yet)
-  version = default("/commandParams/version", None)
+  # this is always guaranteed to be the correct version for the command, even in
+  # upgrade and downgrade scenarios
+  command_version = default("/commandParams/version", None)
+  command_stack = default("/commandParams/target_stack", None)
 
   # something like 2.4.0.0-1234
   # (or None if this is a cluster install and it hasn't been calculated yet)
@@ -114,13 +117,13 @@ def get_stack_feature_version(config):
   upgrade_direction = default("/commandParams/upgrade_direction", None)
 
   # start out with the value that's right 99% of the time
-  version_for_stack_feature_checks = version if version is not None else stack_version
+  version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   # if this is not an upgrade, then we take the simple path
   if upgrade_direction is None:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2} -> {3}".format(
-        stack_version, version, current_cluster_version, version_for_stack_feature_checks))
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+        stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
@@ -129,15 +132,12 @@ def get_stack_feature_version(config):
   is_stop_command = _is_stop_command(config)
   if not is_stop_command:
     Logger.info(
-      "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3} -> {4}".format(
-        stack_version, version, current_cluster_version, upgrade_direction,
+      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
+        stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
         version_for_stack_feature_checks))
 
     return version_for_stack_feature_checks
 
-  original_stack = default("/commandParams/original_stack", None)
-  target_stack = default("/commandParams/target_stack", None)
-
   # something like 2.5.0.0-5678 (or None)
   downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 
@@ -153,15 +153,13 @@ def get_stack_feature_version(config):
     # UPGRADE
     if current_cluster_version is not None:
       version_for_stack_feature_checks = current_cluster_version
-    elif original_stack is not None:
-      version_for_stack_feature_checks = original_stack
     else:
-      version_for_stack_feature_checks = version if version is not None else stack_version
+      version_for_stack_feature_checks = command_version if command_version is not None else stack_version
 
   Logger.info(
-    "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3}, original_stack={4}, target_stack={5}, downgrade_from_version={6}, stop_command={7} -> {8}".format(
-      stack_version, version, current_cluster_version, upgrade_direction, original_stack,
-      target_stack, downgrade_from_version, is_stop_command, version_for_stack_feature_checks))
+    "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
+      stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
+      is_stop_command, version_for_stack_feature_checks))
 
   return version_for_stack_feature_checks
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index 28946e7..8875314 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -198,8 +198,9 @@ public class ExecutionCommandWrapper {
 
       Map<String,String> commandParams = executionCommand.getCommandParams();
 
+      // set the version for the command if it's not already set
       ClusterVersionEntity effectiveClusterVersion = cluster.getEffectiveClusterVersion();
-      if (null != effectiveClusterVersion) {
+      if (null != effectiveClusterVersion && !commandParams.containsKey(KeyNames.VERSION)) {
         commandParams.put(KeyNames.VERSION,
             effectiveClusterVersion.getRepositoryVersion().getVersion());
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 32fb37b..7948d30 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -67,7 +67,7 @@ public class ExecutionCommand extends AgentCommand {
   private String role;
 
   @SerializedName("hostLevelParams")
-  private Map<String, String> hostLevelParams = new HashMap<String, String>();
+  private Map<String, String> hostLevelParams = new HashMap<>();
 
   @SerializedName("roleParams")
   private Map<String, String> roleParams = null;
@@ -77,7 +77,7 @@ public class ExecutionCommand extends AgentCommand {
 
   @SerializedName("clusterHostInfo")
   private Map<String, Set<String>> clusterHostInfo =
-      new HashMap<String, Set<String>>();
+      new HashMap<>();
 
   @SerializedName("configurations")
   private Map<String, Map<String, String>> configurations;
@@ -92,7 +92,7 @@ public class ExecutionCommand extends AgentCommand {
   private boolean forceRefreshConfigTagsBeforeExecution = false;
 
   @SerializedName("commandParams")
-  private Map<String, String> commandParams = new HashMap<String, String>();
+  private Map<String, String> commandParams = new HashMap<>();
 
   @SerializedName("serviceName")
   private String serviceName;
@@ -104,10 +104,10 @@ public class ExecutionCommand extends AgentCommand {
   private String componentName;
 
   @SerializedName("kerberosCommandParams")
-  private List<Map<String, String>> kerberosCommandParams = new ArrayList<Map<String, String>>();
+  private List<Map<String, String>> kerberosCommandParams = new ArrayList<>();
 
   @SerializedName("localComponents")
-  private Set<String> localComponents = new HashSet<String>();
+  private Set<String> localComponents = new HashSet<>();
 
   @SerializedName("availableServices")
   private Map<String, String> availableServices = new HashMap<>();
@@ -149,7 +149,7 @@ public class ExecutionCommand extends AgentCommand {
   }
 
   public Map<String, Map<String, String>> getConfigurationCredentials() {
-    return this.configurationCredentials;
+    return configurationCredentials;
   }
 
   public String getCommandId() {
@@ -434,6 +434,8 @@ public class ExecutionCommand extends AgentCommand {
     String USER_GROUPS = "user_groups";
     String NOT_MANAGED_HDFS_PATH_LIST = "not_managed_hdfs_path_list";
     String VERSION = "version";
+    String SOURCE_STACK = "source_stack";
+    String TARGET_STACK = "target_stack";
     String REFRESH_TOPOLOGY = "refresh_topology";
     String HOST_SYS_PREPPED = "host_sys_prepped";
     String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index c361094..af506f2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -25,7 +25,7 @@ import java.util.Map;
 import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -42,7 +42,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
-  private StackId stackId;
+  private RepositoryVersionEntity repositoryVersion;
 
   /**
    * {@code true} if slave/client component failures should be automatically
@@ -171,27 +171,29 @@ public class ActionExecutionContext {
   }
 
   /**
-   * Gets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Gets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
-   * @return the stackId the stack to use when generating stack-specific content
-   *         for the command.
+   * @return the repository for the stack/version to use when generating
+   *         stack-specific content for the command.
+   *
+   * @return
    */
-  public StackId getStackId() {
-    return stackId;
+  public RepositoryVersionEntity getRepositoryVersion() {
+    return repositoryVersion;
   }
 
   /**
-   * Sets the stack to use for generating stack-associated values for a command.
-   * In some cases the cluster's stack is not the correct one to use, such as
-   * when distributing a repository.
+   * Sets the stack/version to use for generating stack-associated values for a
+   * command. In some cases the cluster's stack is not the correct one to use,
+   * such as when distributing a repository.
    *
    * @param stackId
    *          the stackId to use for stack-based properties on the command.
    */
-  public void setStackId(StackId stackId) {
-    this.stackId = stackId;
+  public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+    this.repositoryVersion = repositoryVersion;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index f75fb41..0638910 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -553,10 +553,12 @@ public class AmbariActionExecutionHelper {
 
     // set the host level params if not already set by whoever is creating this command
     if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
-      // see if the action context has a stack ID set to use, otherwise use the
+      // see if the action context has a repository set to use for the command, otherwise use the
       // cluster's current stack ID
-      StackId stackId = actionContext.getStackId() != null ? actionContext.getStackId()
-          : cluster.getCurrentStackVersion();
+      StackId stackId = cluster.getCurrentStackVersion();
+      if (null != actionContext.getRepositoryVersion()) {
+        stackId = actionContext.getRepositoryVersion().getStackId();
+      }
 
       hostLevelParams.put(STACK_NAME, stackId.getStackName());
       hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 9ea6083..633fe8c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -713,7 +713,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
 
-    actionContext.setStackId(stackId);
+    actionContext.setRepositoryVersion(repoVersion);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     return actionContext;

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 1130026..858b7cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -883,9 +883,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // keep track of which stack to use when building commands - an express
     // upgrade switches the stack half way through while other types move it in
     // the beginning
-    StackId effectiveStackId = upgradeContext.getTargetStackId();
+    RepositoryVersionEntity effectiveRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
     if(upgradeContext.getType() == UpgradeType.NON_ROLLING ) {
-      effectiveStackId = upgradeContext.getSourceStackId();
+      effectiveRepositoryVersion = upgradeContext.getSourceRepositoryVersion();
     }
 
     for (UpgradeGroupHolder group : groups) {
@@ -895,7 +895,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
       if (upgradeContext.getType() == UpgradeType.NON_ROLLING
           && UpdateStackGrouping.class.equals(group.groupClass)) {
-        effectiveStackId = upgradeContext.getTargetStackId();
+        effectiveRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
       }
 
       List<UpgradeItemEntity> itemEntities = new ArrayList<>();
@@ -919,7 +919,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
               itemEntities.add(itemEntity);
 
               injectVariables(configHelper, cluster, itemEntity);
-              makeServerSideStage(upgradeContext, req, effectiveStackId, itemEntity,
+              makeServerSideStage(upgradeContext, req, effectiveRepositoryVersion, itemEntity,
                   (ServerSideActionTask) task, skippable, supportsAutoSkipOnFailure, allowRetry,
                   pack, configUpgradePack);
             }
@@ -934,7 +934,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
           injectVariables(configHelper, cluster, itemEntity);
 
           // upgrade items match a stage
-          createStage(upgradeContext, req, effectiveStackId, itemEntity, wrapper, skippable,
+          createStage(upgradeContext, req, effectiveRepositoryVersion, itemEntity, wrapper,
+              skippable,
               supportsAutoSkipOnFailure, allowRetry);
         }
       }
@@ -1275,10 +1276,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    *          the upgrade context (not {@code null}).
    * @param request
    *          the request to add the new stage to (not {@code null}).
-   * @param effectiveStackId
-   *          the stack ID to use when building the command. This will determine
-   *          things like stack tools and version information added to the
-   *          command (not {@code null}).
+   * @param effectiveRepositoryVersion
+   *          the stack/version to use when building the command. This will
+   *          determine things like stack tools and version information added to
+   *          the command (not {@code null}).
    * @param entity
    *          the upgrade entity to add the new items to (not {@code null}).
    * @param wrapper
@@ -1292,25 +1293,27 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   private void createStage(UpgradeContext context, RequestStageContainer request,
-      StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
-      boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+      RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+      StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+      boolean allowRetry) throws AmbariException {
 
     switch (wrapper.getType()) {
       case CONFIGURE:
       case START:
       case STOP:
       case RESTART:
-        makeCommandStage(context, request, effectiveStackId, entity, wrapper, skippable,
+        makeCommandStage(context, request, effectiveRepositoryVersion, entity, wrapper, skippable,
             supportsAutoSkipOnFailure,
             allowRetry);
         break;
       case RU_TASKS:
-        makeActionStage(context, request, effectiveStackId, entity, wrapper, skippable,
+        makeActionStage(context, request, effectiveRepositoryVersion, entity, wrapper, skippable,
             supportsAutoSkipOnFailure,
             allowRetry);
         break;
       case SERVICE_CHECK:
-        makeServiceCheckStage(context, request, effectiveStackId, entity, wrapper, skippable,
+        makeServiceCheckStage(context, request, effectiveRepositoryVersion, entity, wrapper,
+            skippable,
             supportsAutoSkipOnFailure, allowRetry);
         break;
       default:
@@ -1341,9 +1344,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    *          the upgrade context.
    * @param request
    *          the request object to add the stage to.
-   * @param effectiveStackId
-   *          the stack ID to use when generating content for the command. On
-   *          some upgrade types, this may change during the course of the
+   * @param effectiveRepositoryVersion
+   *          the stack/version to use when generating content for the command.
+   *          On some upgrade types, this may change during the course of the
    *          upgrade orchestration. An express upgrade changes this after
    *          stopping all services.
    * @param entity
@@ -1360,8 +1363,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   private void makeActionStage(UpgradeContext context, RequestStageContainer request,
-      StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
-      boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+      RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+      StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+      boolean allowRetry) throws AmbariException {
 
     if (0 == wrapper.getHosts().size()) {
       throw new AmbariException(
@@ -1369,13 +1373,14 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
 
     Cluster cluster = context.getCluster();
+    StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
 
     // add each host to this stage
     RequestResourceFilter filter = new RequestResourceFilter("", "",
         new ArrayList<>(wrapper.getHosts()));
 
     LOG.debug("Analyzing upgrade item {} with tasks: {}.", entity.getText(), entity.getTasks());
-    Map<String, String> params = getNewParameterMap(request, context);
+    Map<String, String> params = getNewParameterMap(request, context, effectiveRepositoryVersion);
     params.put(UpgradeContext.COMMAND_PARAM_TASKS, entity.getTasks());
 
     // Apply additional parameters to the command that come from the stage.
@@ -1403,7 +1408,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         EXECUTE_TASK_ROLE, Collections.singletonList(filter), params);
 
-    actionContext.setStackId(effectiveStackId);
+    actionContext.setRepositoryVersion(effectiveRepositoryVersion);
 
     // hosts in maintenance mode are excluded from the upgrade
     actionContext.setMaintenanceModeHostExcluded(true);
@@ -1449,10 +1454,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    *          Upgrade Context
    * @param request
    *          Container for stage
-   * @param effectiveStackId
-   *          the stack ID to use when building the command. This will determine
-   *          things like stack tools and version information added to the
-   *          command (not {@code null}).
+   * @param effectiveRepositoryVersion
+   *          the stack/version to use when building the command. This will
+   *          determine things like stack tools and version information added to
+   *          the command (not {@code null}).
    * @param entity
    *          Upgrade Item
    * @param wrapper
@@ -1464,10 +1469,12 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   private void makeCommandStage(UpgradeContext context, RequestStageContainer request,
-      StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
-      boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+      RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+      StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+      boolean allowRetry) throws AmbariException {
 
     Cluster cluster = context.getCluster();
+    StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
 
     List<RequestResourceFilter> filters = new ArrayList<>();
 
@@ -1490,7 +1497,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         break;
     }
 
-    Map<String, String> commandParams = getNewParameterMap(request, context);
+    Map<String, String> commandParams = getNewParameterMap(request, context,
+        effectiveRepositoryVersion);
 
     // Apply additional parameters to the command that come from the stage.
     applyAdditionalParameters(wrapper, commandParams);
@@ -1498,7 +1506,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
 
-    actionContext.setStackId(effectiveStackId);
+    actionContext.setRepositoryVersion(effectiveRepositoryVersion);
 
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
@@ -1540,7 +1548,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
   }
 
   private void makeServiceCheckStage(UpgradeContext context, RequestStageContainer request,
-      StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
+      RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+      StageWrapper wrapper, boolean skippable,
       boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
 
     List<RequestResourceFilter> filters = new ArrayList<>();
@@ -1550,8 +1559,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
 
     Cluster cluster = context.getCluster();
+    StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
 
-    Map<String, String> commandParams = getNewParameterMap(request, context);
+    Map<String, String> commandParams = getNewParameterMap(request, context,
+        effectiveRepositoryVersion);
 
     // Apply additional parameters to the command that come from the stage.
     applyAdditionalParameters(wrapper, commandParams);
@@ -1559,7 +1570,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
-    actionContext.setStackId(effectiveStackId);
+    actionContext.setRepositoryVersion(effectiveRepositoryVersion);
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
@@ -1586,7 +1597,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     stage.setStageId(stageId);
     entity.setStageId(Long.valueOf(stageId));
 
-    Map<String, String> requestParams = getNewParameterMap(request, context);
+    Map<String, String> requestParams = getNewParameterMap(request, context,
+        effectiveRepositoryVersion);
+
     s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams, jsons);
 
     request.addStages(Collections.singletonList(stage));
@@ -1599,10 +1612,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    *          upgrade context
    * @param request
    *          upgrade request
-   * @param effectiveStackId
-   *          the stack ID to use when building the command. This will determine
-   *          things like stack tools and version information added to the
-   *          command (not {@code null}).
+   * @param effectiveRepositoryVersion
+   *          the stack/version to use when building the command. This will
+   *          determine things like stack tools and version information added to
+   *          the command (not {@code null}).
    * @param entity
    *          a single of upgrade
    * @param task
@@ -1617,14 +1630,18 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   private void makeServerSideStage(UpgradeContext context, RequestStageContainer request,
-      StackId effectiveStackId, UpgradeItemEntity entity, ServerSideActionTask task,
+      RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+      ServerSideActionTask task,
       boolean skippable, boolean supportsAutoSkipOnFailure, boolean allowRetry,
       UpgradePack upgradePack, ConfigUpgradePack configUpgradePack)
           throws AmbariException {
 
     Cluster cluster = context.getCluster();
+    StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
+
+    Map<String, String> commandParams = getNewParameterMap(request, context,
+        effectiveRepositoryVersion);
 
-    Map<String, String> commandParams = getNewParameterMap(request, context);
     commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName());
 
     // Notice that this does not apply any params because the input does not specify a stage.
@@ -1702,7 +1719,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         Role.AMBARI_SERVER_ACTION.toString(), Collections.<RequestResourceFilter> emptyList(),
         commandParams);
 
-    actionContext.setStackId(effectiveStackId);
+    actionContext.setRepositoryVersion(effectiveRepositoryVersion);
     actionContext.setTimeout(Short.valueOf((short) -1));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
@@ -1743,11 +1760,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * following properties are already set:
    * <ul>
    * <li>{@link UpgradeContext#COMMAND_PARAM_CLUSTER_NAME}
+   * <li>{@link UpgradeContext#COMMAND_PARAM_SOURCE_STACK}
+   * <li>{@link UpgradeContext#COMMAND_PARAM_TARGET_STACK}
    * <li>{@link UpgradeContext#COMMAND_PARAM_VERSION}
    * <li>{@link UpgradeContext#COMMAND_PARAM_DIRECTION}
-   * <li>{@link UpgradeContext#COMMAND_PARAM_ORIGINAL_STACK}
-   * <li>{@link UpgradeContext#COMMAND_PARAM_TARGET_STACK}
-   * <li>{@link UpgradeContext#COMMAND_DOWNGRADE_FROM_VERSION}
    * <li>{@link UpgradeContext#COMMAND_PARAM_UPGRADE_TYPE}
    * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
    * order to have the commands contain the correct configurations. Otherwise,
@@ -1761,10 +1777,13 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @return the initialized parameter map.
    */
   private Map<String, String> getNewParameterMap(RequestStageContainer requestStageContainer,
-      UpgradeContext context) {
+      UpgradeContext context, RepositoryVersionEntity effectiveRepositoryVersion) {
     Map<String, String> parameters = context.getInitializedCommandParameters();
+
     parameters.put(UpgradeContext.COMMAND_PARAM_REQUEST_ID,
         String.valueOf(requestStageContainer.getId()));
+
+    parameters.put(UpgradeContext.COMMAND_PARAM_VERSION, effectiveRepositoryVersion.getVersion());
     return parameters;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 0e6f0c4..d531460 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -30,6 +30,7 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.events.StackUpgradeFinishEvent;
 import org.apache.ambari.server.events.publishers.VersionEventPublisher;
@@ -73,26 +74,13 @@ public class FinalizeUpgradeAction extends AbstractServerAction {
 
   public static final String CLUSTER_NAME_KEY = "cluster_name";
   public static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-  public static final String VERSION_KEY = "version";
+  public static final String VERSION_KEY = KeyNames.VERSION;
+  public static final String TARGET_STACK_KEY = KeyNames.TARGET_STACK;
   public static final String REQUEST_ID = "request_id";
   public static final String PREVIOUS_UPGRADE_NOT_COMPLETED_MSG = "It is possible that a previous upgrade was not finalized. " +
       "For this reason, Ambari will not remove any configs. Please ensure that all database records are correct.";
 
   /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   */
-  public static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   */
-  public static final String TARGET_STACK_KEY = "target_stack";
-
-  /**
    * The Cluster that this ServerAction implementation is executing on
    */
   @Inject

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 60d02a3..842da95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -17,11 +17,16 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
@@ -37,10 +42,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * Update the user-defined Kerberos Descriptor to work with the current stack.
@@ -56,25 +58,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
    * @see Direction
    */
   private static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   *
-   * @see Direction
-   */
-  private static final String TARGET_STACK_KEY = "target_stack";
-
   private final static String KERBEROS_DESCRIPTOR_NAME = "kerberos_descriptor";
   private final static String KERBEROS_DESCRIPTOR_BACKUP_NAME = "kerberos_descriptor_backup";
 
@@ -104,21 +87,21 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
     HostRoleCommand hostRoleCommand = getHostRoleCommand();
     String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName();
     Cluster cluster = clusters.getCluster(clusterName);
-    List<String> messages = new ArrayList<String>();
-    List<String> errorMessages = new ArrayList<String>();
+    List<String> messages = new ArrayList<>();
+    List<String> errorMessages = new ArrayList<>();
 
     if (cluster != null) {
       logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
 
-      TreeMap<String, String> foreignKeys = new TreeMap<String, String>();
+      TreeMap<String, String> foreignKeys = new TreeMap<>();
       foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
 
       ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
       KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
 
       if (userDescriptor != null) {
-        StackId originalStackId = getStackIdFromCommandParams(ORIGINAL_STACK_KEY);
-        StackId targetStackId = getStackIdFromCommandParams(TARGET_STACK_KEY);
+        StackId originalStackId = cluster.getCurrentStackVersion();
+        StackId targetStackId = getStackIdFromCommandParams(KeyNames.TARGET_STACK);
 
         if (isDowngrade()) {
           restoreDescriptor(foreignKeys, messages, errorMessages);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index b97dc80..93e6393 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -62,21 +62,15 @@ public class UpgradeContext {
   public static final String COMMAND_PARAM_UPGRADE_TYPE = "upgrade_type";
   public static final String COMMAND_PARAM_TASKS = "tasks";
   public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
-  public static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
 
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   */
-  public static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
+  public static final String COMMAND_PARAM_SOURCE_STACK = KeyNames.SOURCE_STACK;
+  public static final String COMMAND_PARAM_TARGET_STACK = KeyNames.TARGET_STACK;
 
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   */
-  public static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
+  @Deprecated
+  @Experimental(
+      feature = ExperimentalFeature.STACK_UPGRADES_BETWEEN_VENDORS,
+      comment = "This isn't needed anymore, but many python classes still use it")
+  public static final String COMMAND_PARAM_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
 
   /**
    * The cluster that the upgrade is for.
@@ -528,6 +522,7 @@ public class UpgradeContext {
    * <ul>
    * <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
    * <li>{@link #COMMAND_PARAM_DIRECTION}
+   * <li>{@link #COMMAND_PARAM_DOWNGRADE_FROM_VERSION}
    * <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
    * <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
    * order to have the commands contain the correct configurations. Otherwise,
@@ -542,8 +537,16 @@ public class UpgradeContext {
   public Map<String, String> getInitializedCommandParameters() {
     Map<String, String> parameters = new HashMap<>();
 
+    Direction direction = getDirection();
     parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
-    parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+    parameters.put(COMMAND_PARAM_DIRECTION, direction.name().toLowerCase());
+
+    parameters.put(COMMAND_PARAM_SOURCE_STACK, m_fromRepositoryVersion.getStackId().getStackId());
+    parameters.put(COMMAND_PARAM_TARGET_STACK, m_toRepositoryVersion.getStackId().getStackId());
+
+    if (direction == Direction.DOWNGRADE) {
+      parameters.put(COMMAND_PARAM_DOWNGRADE_FROM_VERSION, m_fromRepositoryVersion.getVersion());
+    }
 
     if (null != getType()) {
       // use the serialized attributes of the enum to convert it to a string,

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index b54b565..5dbbaed 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -21,6 +21,7 @@ limitations under the License.
 import socket
 import status_params
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions import Direction
 from setup_spark import *
@@ -57,10 +58,8 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 java_home = config['hostLevelParams']['java_home']
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-if upgrade_direction == Direction.DOWNGRADE:
-  stack_version_unformatted = config['commandParams']['original_stack'].split("-")[1]
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
 
 sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 
@@ -71,7 +70,7 @@ spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
   hadoop_home = stack_select.get_hadoop_dir("home")
   spark_conf = format("{stack_root}/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
@@ -212,7 +211,7 @@ dfs_type = default("/commandParams/dfs_type", "")
 # livy is only supported from HDP 2.5
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+if check_stack_feature(StackFeature.SPARK_LIVY, version_for_stack_feature_checks) and "livy-env" in config['configurations']:
   livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
   livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
   livy_log_dir = config['configurations']['livy-env']['livy_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
index 4034532..bc86a67 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
@@ -114,11 +114,11 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       mode=0644
     )
 
-  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+  effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
   if effective_version:
     effective_version = format_stack_version(effective_version)
 
-  if effective_version and check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
+  if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
     File(os.path.join(params.spark_conf, 'java-opts'),
       owner=params.spark_user,
       group=params.spark_group,
@@ -130,7 +130,7 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
       action="delete"
     )
 
-  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+  if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
     # create spark-thrift-fairscheduler.xml
     File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
       owner=params.spark_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
index 31a296a..2838186 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
@@ -34,11 +34,11 @@ def spark_service(name, upgrade_type=None, action=None):
 
   if action == 'start':
 
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+    effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
     if effective_version:
       effective_version = format_stack_version(effective_version)
 
-    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+    if name == 'jobhistoryserver' and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
       # copy spark-hdp-assembly.jar to hdfs
       copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       # create spark history directory
@@ -58,7 +58,7 @@ def spark_service(name, upgrade_type=None, action=None):
 
     # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
     # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+    if check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version_for_stack_feature_checks):
       resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       if resource_created:
         params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 47d2a81..6675c58 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -409,7 +409,6 @@ public class ComponentVersionCheckActionTest {
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-    commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
     commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
 
     ExecutionCommand executionCommand = new ExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index c9c0dd0..037e47b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -677,7 +677,6 @@ public class UpgradeActionTest {
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-    commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
     commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
 
     ExecutionCommand executionCommand = new ExecutionCommand();
@@ -860,7 +859,6 @@ public class UpgradeActionTest {
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-    commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
     commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
 
     ExecutionCommand executionCommand = new ExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index 6b80623..0da9088 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -25,6 +25,11 @@ import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
 
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.ExecutionCommand;
@@ -33,6 +38,7 @@ import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
@@ -45,11 +51,6 @@ import org.powermock.api.mockito.PowerMockito;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
 /**
  * Tests OozieConfigCalculation logic
  */
@@ -63,6 +64,7 @@ public class UpgradeUserKerberosDescriptorTest {
   private ArtifactDAO artifactDAO;
 
   private TreeMap<String, Field> fields = new TreeMap<>();
+  private StackId HDP_24 = new StackId("HDP", "2.4");
 
   @Before
   public void setup() throws Exception {
@@ -74,6 +76,7 @@ public class UpgradeUserKerberosDescriptorTest {
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(cluster.getClusterId()).andReturn(1l).atLeastOnce();
+    expect(cluster.getCurrentStackVersion()).andReturn(HDP_24).atLeastOnce();
     replay(clusters, cluster);
 
     prepareFields();
@@ -83,10 +86,9 @@ public class UpgradeUserKerberosDescriptorTest {
   @Test
   public void testUpgrade() throws Exception {
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
     commandParams.put("upgrade_direction", "UPGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
     commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();
@@ -141,10 +143,9 @@ public class UpgradeUserKerberosDescriptorTest {
   @Test
   public void testDowngrade() throws Exception {
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("clusterName", "c1");
     commandParams.put("upgrade_direction", "DOWNGRADE");
-    commandParams.put("original_stack", "HDP-2.4");
     commandParams.put("target_stack", "HDP-2.5");
 
     ExecutionCommand executionCommand = new ExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 0116a7a..230734c 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -28,6 +28,32 @@ from unittest import TestCase
 Logger.initialize_logger()
 
 class TestStackFeature(TestCase):
+  """
+  EU Upgrade (HDP 2.5 to HDP 2.6)
+    - STOP
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.5
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.5.0.0-1237
+    - START
+      hostLevelParams/stack_name = HDP
+      hostLevelParams/stack_version = 2.6
+      hostLevelParams/current_version = 2.5.0.0-1237
+      commandParams/version = 2.6.0.0-334
+
+  EU Downgrade (HDP 2.6 to HDP 2.5)
+    - STOP
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.6
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.6.0.0-334
+    - START
+    hostLevelParams/stack_name = HDP
+    hostLevelParams/stack_version = 2.5
+    hostLevelParams/current_version = 2.5.0.0-1237
+    commandParams/version = 2.5.0.0-1237
+  """
+
   def test_get_stack_feature_version_missing_params(self):
     try:
       stack_feature_version = get_stack_feature_version({})
@@ -122,7 +148,7 @@ class TestStackFeature(TestCase):
         "current_version":  "2.4.0.0-1234"
       },
       "commandParams": {
-        "original_stack": "2.4",
+        "source_stack": "2.4",
         "target_stack": "2.5",
         "upgrade_direction": "upgrade",
         "version": "2.5.9.9-9999"
@@ -143,8 +169,8 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
         "version":"2.4.0.0-1234",
         "downgrade_from_version": "2.5.9.9-9999"
@@ -166,10 +192,10 @@ class TestStackFeature(TestCase):
         "current_version":"2.4.0.0-1234"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
@@ -189,10 +215,10 @@ class TestStackFeature(TestCase):
         "custom_command":"STOP"
       },
       "commandParams":{
-        "original_stack":"2.4",
-        "target_stack":"2.5",
+        "source_stack":"2.5",
+        "target_stack":"2.4",
         "upgrade_direction":"downgrade",
-        "version":"2.4.0.0-1234",
+        "version":"2.5.9.9-9999",
         "downgrade_from_version":"2.5.9.9-9999"
       }
     }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
index e1a89a8..8e03b7f 100644
--- a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
+++ b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
@@ -225,7 +225,7 @@ class TestRUSetAll(RMFTestCase):
       # alter JSON for a downgrade from 2.3 to 2.2
       json_payload['commandParams']['version'] = "2.2.0.0-1234"
       json_payload['commandParams']['downgrade_from_version'] = "2.3.0.0-1234"
-      json_payload['commandParams']['original_stack'] = "HDP-2.2"
+      json_payload['commandParams']['source_stack'] = "HDP-2.2"
       json_payload['commandParams']['target_stack'] = "HDP-2.3"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.2"
@@ -263,7 +263,7 @@ class TestRUSetAll(RMFTestCase):
 
       json_payload['commandParams']['version'] = "2.3.0.0-1234"
       json_payload['commandParams']['downgrade_from_version'] = "2.3.0.0-5678"
-      json_payload['commandParams']['original_stack'] = "HDP-2.3"
+      json_payload['commandParams']['source_stack'] = "HDP-2.3"
       json_payload['commandParams']['target_stack'] = "HDP-2.3"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.3"
@@ -291,7 +291,7 @@ class TestRUSetAll(RMFTestCase):
       # alter JSON for a downgrade from 2.2 to 2.2
       json_payload['commandParams']['version'] = "2.2.0.0-1234"
       json_payload['commandParams']['downgrade_from_version'] = "2.2.0.0-5678"
-      json_payload['commandParams']['original_stack'] = "HDP-2.2"
+      json_payload['commandParams']['source_stack'] = "HDP-2.2"
       json_payload['commandParams']['target_stack'] = "HDP-2.2"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.2"

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 7f77d83..3aadf2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 87b18af..2d48ff6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -25,7 +25,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2844", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.2", 
+        "source_stack": "HDP-2.2",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "desired_namenode_role": "standby", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 99fcba0..021695b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -13,7 +13,7 @@
         "upgrade_type": "nonrolling_upgrade",
         "version": "2.3.2.0-2950", 
         "forceRefreshConfigTagsBeforeExecution": "*", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 
         "script_type": "PYTHON"

http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index a9db11c..1805c3b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -22,7 +22,7 @@
         "upgrade_type": "rolling_upgrade",
         "command_retry_max_attempt_count": "3", 
         "version": "2.3.0.0-2096", 
-        "original_stack": "HDP-2.3", 
+        "source_stack": "HDP-2.3",
         "command_retry_enabled": "false", 
         "command_timeout": "1200", 
         "target_stack": "HDP-2.3", 


[41/50] [abbrv] ambari git commit: AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64. (atkach)

Posted by jo...@apache.org.
AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/55da4262
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/55da4262
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/55da4262

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 55da4262661dc1b3d123a9d3230a220e6d2bd9bf
Parents: b1c3784
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 17 15:20:36 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 17 15:20:36 2017 +0300

----------------------------------------------------------------------
 .../scripts/controllers/stackVersions/StackVersionsCreateCtrl.js  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/55da4262/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index b3c27dc..eed5372 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -195,8 +195,7 @@ angular.module('ambariAdminConsole')
           if (!existingOSHash[stackOs.OperatingSystems.os_type]) {
             stackOs.selected = false;
             stackOs.repositories.forEach(function(repo) {
-              repo.Repositories.base_url = '';
-              repo.Repositories.initial_base_url = '';
+              repo.Repositories.initial_base_url = repo.Repositories.default_base_url;
             });
             $scope.osList.push(stackOs);
           }


[33/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/branch-2.5' into branch-2.5

Posted by jo...@apache.org.
Merge remote-tracking branch 'origin/branch-2.5' into branch-2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d5392fd0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d5392fd0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d5392fd0

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: d5392fd053e87f0e2a30934f1fdae375e5097da0
Parents: bc57667 2076801
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Sun Jul 16 19:45:57 2017 +0300
Committer: Eugene Chekanskiy <ec...@hortonworks.com>
Committed: Sun Jul 16 19:45:57 2017 +0300

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |    3 +
 ambari-agent/conf/unix/install-helper.sh        |    8 +
 ambari-agent/etc/init.d/ambari-agent            |   22 +-
 ambari-agent/pom.xml                            |   13 +-
 .../src/main/package/rpm/posttrans_agent.sh     |    7 +
 .../ambari_agent/AlertSchedulerHandler.py       |   10 +-
 .../python/ambari_agent/alerts/base_alert.py    |    8 +-
 .../python/ambari_agent/alerts/port_alert.py    |  107 +-
 ambari-agent/src/packages/tarball/all.xml       |    2 +-
 .../ambari_agent/TestAlertSchedulerHandler.py   |   17 +-
 .../test/python/ambari_agent/TestHeartbeat.py   |   40 +
 .../resource_management/TestPackageResource.py  |   41 +
 .../ambari_commons/resources/os_family.json     |    3 +-
 .../core/providers/package/__init__.py          |    2 +-
 .../core/providers/package/apt.py               |   10 +-
 .../core/providers/package/choco.py             |    4 +-
 .../core/providers/package/yumrpm.py            |    9 +-
 .../core/providers/package/zypper.py            |    9 +-
 .../core/resources/packaging.py                 |    6 +
 .../libraries/functions/conf_select.py          |   57 +-
 .../libraries/functions/mounted_dirs_helper.py  |    1 +
 .../libraries/functions/stack_features.py       |   41 +-
 .../libraries/functions/stack_tools.py          |   54 +-
 .../libraries/providers/hdfs_resource.py        |    9 +-
 .../libraries/script/script.py                  |   19 +-
 ambari-metrics/ambari-metrics-common/pom.xml    |    4 +
 .../timeline/AbstractTimelineMetricsSink.java   |   76 +-
 .../metrics2/sink/timeline/Precision.java       |    2 +-
 .../cache/HandleConnectExceptionTest.java       |   76 +-
 .../ambari-metrics/datasource.js                |    5 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |    6 +-
 .../timeline/HBaseTimelineMetricStore.java      |   15 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |   69 +-
 .../timeline/TimelineMetricConfiguration.java   |   24 +
 .../timeline/TimelineMetricsAggregatorSink.java |   60 +
 .../timeline/PhoenixHBaseAccessorTest.java      |   73 +
 .../timeline/TestTimelineMetricStore.java       |    1 +
 .../TimelineMetricsAggregatorMemorySink.java    |  141 +
 ambari-server/pom.xml                           |   10 +-
 ambari-server/sbin/ambari-server                |    6 +-
 ambari-server/src/main/assemblies/server.xml    |   10 +
 .../apache/ambari/annotations/Experimental.java |    6 +
 .../ambari/annotations/ExperimentalFeature.java |    7 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   46 +
 .../server/agent/AlertDefinitionCommand.java    |    7 +-
 .../ambari/server/agent/ExecutionCommand.java   |   15 +-
 .../ambari/server/agent/HeartBeatHandler.java   |    4 +-
 .../alerts/ComponentVersionAlertRunnable.java   |    4 +-
 .../eventcreator/UpgradeEventCreator.java       |    2 +-
 .../ambari/server/checks/CheckDescription.java  |   42 +-
 .../checks/ComponentsExistInRepoCheck.java      |  142 +
 .../checks/DatabaseConsistencyCheckHelper.java  |   14 +
 .../ambari/server/checks/JavaVersionCheck.java  |  102 +
 .../server/checks/PreviousUpgradeCompleted.java |   11 +-
 .../controller/ActionExecutionContext.java      |   28 +
 .../controller/AmbariActionExecutionHelper.java |   23 +-
 .../AmbariCustomCommandExecutionHelper.java     |   29 +-
 .../AmbariManagementControllerImpl.java         |   14 +-
 .../server/controller/ExecuteCommandJson.java   |    4 +
 .../server/controller/KerberosHelperImpl.java   |    8 +-
 .../internal/AbstractProviderModule.java        |   49 +-
 .../BlueprintConfigurationProcessor.java        |  232 +-
 .../internal/ClientConfigResourceProvider.java  |    2 +
 .../ClusterStackVersionResourceProvider.java    |  187 +-
 .../PreUpgradeCheckResourceProvider.java        |    9 +-
 .../internal/RequestResourceProvider.java       |   12 +-
 .../internal/UpgradeResourceProvider.java       |  416 +-
 .../server/controller/jmx/JMXHostProvider.java  |   15 +
 .../controller/jmx/JMXPropertyProvider.java     |   25 +
 .../listeners/upgrade/StackVersionListener.java |  225 +-
 .../system/impl/AmbariMetricSinkImpl.java       |    3 +
 .../system/impl/DatabaseMetricsSource.java      |    4 +-
 .../metrics/system/impl/JvmMetricsSource.java   |   12 +-
 .../metrics/system/impl/MetricsServiceImpl.java |    5 +-
 .../dispatchers/AlertScriptDispatcher.java      |    7 +-
 .../apache/ambari/server/orm/DBAccessor.java    |   41 +-
 .../ambari/server/orm/DBAccessorImpl.java       |  123 +-
 .../server/orm/dao/RepositoryVersionDAO.java    |   19 +
 .../orm/entities/RepositoryVersionEntity.java   |   26 +-
 .../server/orm/entities/UpgradeEntity.java      |   46 +-
 .../server/orm/helpers/dbms/DbmsHelper.java     |   10 +
 .../orm/helpers/dbms/GenericDbmsHelper.java     |   12 +
 .../server/orm/helpers/dbms/H2Helper.java       |   10 +
 .../LdapToPamMigrationHelper.java               |   73 +
 .../server/security/authorization/Users.java    |    4 +
 .../upgrades/ChangeStackReferencesAction.java   |  110 +
 .../upgrades/ComponentVersionCheckAction.java   |   20 +-
 .../upgrades/FinalizeUpgradeAction.java         |  178 +-
 .../upgrades/UpdateDesiredStackAction.java      |  111 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java |   41 +-
 .../ambari/server/stack/StackManager.java       |   14 +-
 .../state/ClientConfigFileDefinition.java       |   15 +-
 .../org/apache/ambari/server/state/Cluster.java |   12 +
 .../ambari/server/state/ConfigHelper.java       |   47 +-
 .../org/apache/ambari/server/state/Host.java    |    4 +-
 .../ambari/server/state/UpgradeContext.java     |  240 +-
 .../server/state/UpgradeContextFactory.java     |   14 +-
 .../ambari/server/state/UpgradeHelper.java      |   56 +-
 .../server/state/alert/AlertDefinitionHash.java |   14 +-
 .../server/state/cluster/ClusterImpl.java       |  119 +-
 .../server/state/cluster/ClustersImpl.java      |   25 +-
 .../ambari/server/state/host/HostImpl.java      |   29 +-
 .../KerberosDescriptorUpdateHelper.java         |    9 +-
 .../services/AlertNoticeDispatchService.java    |   95 +-
 .../services/RetryUpgradeActionService.java     |    4 +-
 .../state/stack/upgrade/HostOrderGrouping.java  |    5 +-
 .../ambari/server/topology/AmbariContext.java   |   66 +-
 .../server/topology/BlueprintValidatorImpl.java |   88 +-
 .../ambari/server/topology/HostRequest.java     |   12 +-
 .../ambari/server/topology/LogicalRequest.java  |   35 +-
 .../ambari/server/topology/PersistedState.java  |    8 +-
 .../server/topology/PersistedStateImpl.java     |   18 +-
 .../ambari/server/topology/TopologyManager.java |   30 +-
 .../server/topology/TopologyValidator.java      |    2 +-
 .../RequiredConfigPropertiesValidator.java      |  142 +
 .../validators/TopologyValidatorFactory.java    |    3 +-
 .../server/upgrade/UpgradeCatalog220.java       |   10 +-
 .../server/upgrade/UpgradeCatalog251.java       |    3 +-
 .../server/upgrade/UpgradeCatalog252.java       |  105 +
 ambari-server/src/main/python/ambari-server.py  |   10 +-
 .../ambari_server/dbConfiguration_linux.py      |   34 +-
 .../main/python/ambari_server/serverUpgrade.py  |   22 +-
 .../main/python/ambari_server/setupActions.py   |    1 +
 .../main/python/ambari_server/setupSecurity.py  |  119 +-
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |    8 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |    8 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |    8 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |    8 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |    8 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |    8 +-
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |    5 +
 .../package/scripts/accumulo_configuration.py   |    3 +
 .../1.6.1.2.2.0/package/scripts/params.py       |    5 +-
 .../package/templates/accumulo_jaas.conf.j2     |   29 +
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |    4 +-
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |   12 +-
 .../0.96.0.2.0/package/scripts/hbase_master.py  |   10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |   40 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   12 +
 .../HBASE/0.96.0.2.0/role_command_order.json    |    9 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |    4 +-
 .../package/scripts/namenode_upgrade.py         |    2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |    4 +
 .../HIVE/0.12.0.2.0/metainfo.xml                |   20 +-
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   11 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |   11 +
 .../0.12.0.2.0/package/scripts/service_check.py |    3 +-
 .../0.12.0.2.0/package/scripts/status_params.py |    6 +
 .../common-services/JNBG/0.2.0/alerts.json      |   32 +
 .../JNBG/0.2.0/configuration/jnbg-env.xml       |  209 +
 .../common-services/JNBG/0.2.0/kerberos.json    |   59 +
 .../common-services/JNBG/0.2.0/metainfo.xml     |  108 +
 .../JNBG/0.2.0/package/files/jkg_install.sh     |  169 +
 .../JNBG/0.2.0/package/files/jkg_start.sh       |   84 +
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |   79 +
 .../0.2.0/package/files/pyspark_configure.sh    |  104 +
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh |  138 +
 .../JNBG/0.2.0/package/files/toree_configure.sh |  151 +
 .../JNBG/0.2.0/package/files/toree_install.sh   |  176 +
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     |  134 +
 .../0.2.0/package/scripts/jkg_toree_params.py   |  177 +
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |   81 +
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |   66 +
 .../JNBG/0.2.0/package/scripts/py_client.py     |   63 +
 .../0.2.0/package/scripts/py_client_params.py   |   39 +
 .../JNBG/0.2.0/package/scripts/service_check.py |   44 +
 .../JNBG/0.2.0/package/scripts/status_params.py |   26 +
 .../0.10.0/configuration/ranger-kafka-audit.xml |   58 +
 .../common-services/KAFKA/0.10.0/kerberos.json  |   79 +
 .../common-services/KAFKA/0.10.0/metainfo.xml   |   28 +
 .../KAFKA/0.8.1/package/scripts/kafka.py        |   12 +
 .../KAFKA/0.8.1/package/scripts/params.py       |    3 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |    2 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   |    8 +
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |    2 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   26 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |    4 +-
 .../package/scripts/oozie_server_upgrade.py     |   15 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |   15 +-
 .../PXF/3.0.0/configuration/pxf-profiles.xml    |   23 +
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |   48 +
 .../common-services/R4ML/0.8.0/metainfo.xml     |   92 +
 .../R4ML/0.8.0/package/files/Install.R          |   25 +
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |   28 +
 .../R4ML/0.8.0/package/files/localr.repo        |   22 +
 .../R4ML/0.8.0/package/scripts/__init__.py      |   19 +
 .../R4ML/0.8.0/package/scripts/params.py        |   80 +
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   |  201 +
 .../R4ML/0.8.0/package/scripts/service_check.py |   45 +
 .../RANGER/0.4.0/package/scripts/params.py      |    4 +-
 .../0.4.0/package/scripts/setup_ranger_xml.py   |   12 +-
 .../0.5.0/configuration/ranger-ugsync-site.xml  |    3 +
 .../SPARK/1.2.1/package/scripts/params.py       |   11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |    6 +-
 .../1.2.1/package/scripts/spark_service.py      |    6 +-
 .../STORM/0.9.1/package/scripts/nimbus.py       |    8 +-
 .../SYSTEMML/0.10.0/metainfo.xml                |   77 +
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |   19 +
 .../SYSTEMML/0.10.0/package/scripts/params.py   |   40 +
 .../0.10.0/package/scripts/service_check.py     |   43 +
 .../0.10.0/package/scripts/systemml_client.py   |   49 +
 .../common-services/TITAN/1.0.0/alerts.json     |   33 +
 .../1.0.0/configuration/gremlin-server.xml      |   85 +
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |   94 +
 .../1.0.0/configuration/hadoop-hbase-read.xml   |  102 +
 .../TITAN/1.0.0/configuration/titan-env.xml     |  157 +
 .../1.0.0/configuration/titan-hbase-solr.xml    |   69 +
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |   65 +
 .../common-services/TITAN/1.0.0/kerberos.json   |   52 +
 .../common-services/TITAN/1.0.0/metainfo.xml    |  124 +
 .../package/alerts/alert_check_titan_server.py  |   65 +
 .../package/files/gremlin-server-script.sh      |   86 +
 .../package/files/tinkergraph-empty.properties  |   18 +
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |   20 +
 .../TITAN/1.0.0/package/scripts/params.py       |  202 +
 .../1.0.0/package/scripts/params_server.py      |   37 +
 .../1.0.0/package/scripts/service_check.py      |   88 +
 .../TITAN/1.0.0/package/scripts/titan.py        |  143 +
 .../TITAN/1.0.0/package/scripts/titan_client.py |   61 +
 .../TITAN/1.0.0/package/scripts/titan_server.py |   67 +
 .../1.0.0/package/scripts/titan_service.py      |  150 +
 .../templates/titan_solr_client_jaas.conf.j2    |   23 +
 .../package/templates/titan_solr_jaas.conf.j2   |   26 +
 .../0.6.0.2.5/configuration/zeppelin-config.xml |    7 +
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |    5 +
 .../0.6.0.2.5/package/scripts/master.py         |   21 +-
 .../0.6.0.2.5/package/scripts/params.py         |   23 +
 .../system_action_definitions.xml               |   10 +
 .../scripts/force_remove_packages.py            |   56 +
 .../custom_actions/scripts/install_packages.py  |   13 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../4.0/blueprints/multinode-default.json       |  182 +
 .../4.0/blueprints/singlenode-default.json      |  133 +
 .../4.0/configuration/cluster-env.xml           |  338 +
 .../4.0/hooks/after-INSTALL/scripts/hook.py     |   38 +
 .../4.0/hooks/after-INSTALL/scripts/params.py   |   88 +
 .../scripts/shared_initialization.py            |   89 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |   63 +
 .../4.0/hooks/before-ANY/scripts/hook.py        |   36 +
 .../4.0/hooks/before-ANY/scripts/params.py      |  226 +
 .../before-ANY/scripts/shared_initialization.py |  242 +
 .../4.0/hooks/before-INSTALL/scripts/hook.py    |   37 +
 .../4.0/hooks/before-INSTALL/scripts/params.py  |  111 +
 .../scripts/repo_initialization.py              |   63 +
 .../scripts/shared_initialization.py            |   34 +
 .../4.0/hooks/before-RESTART/scripts/hook.py    |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 0 -> 28296598 bytes
 .../before-START/files/task-log4j.properties    |  134 +
 .../hooks/before-START/files/topology_script.py |   66 +
 .../4.0/hooks/before-START/scripts/hook.py      |   40 +
 .../4.0/hooks/before-START/scripts/params.py    |  211 +
 .../before-START/scripts/rack_awareness.py      |   71 +
 .../scripts/shared_initialization.py            |  152 +
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |   88 +
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../stacks/BigInsights/4.0/kerberos.json        |   68 +
 .../stacks/BigInsights/4.0/metainfo.xml         |   22 +
 .../4.0/properties/stack_features.json          |  214 +
 .../BigInsights/4.0/properties/stack_tools.json |   14 +
 .../stacks/BigInsights/4.0/repos/repoinfo.xml   |   35 +
 .../BigInsights/4.0/role_command_order.json     |   70 +
 .../4.0/services/AMBARI_METRICS/alerts.json     |  183 +
 .../AMBARI_METRICS/configuration/ams-env.xml    |  114 +
 .../configuration/ams-hbase-env.xml             |  245 +
 .../configuration/ams-hbase-log4j.xml           |  147 +
 .../configuration/ams-hbase-policy.xml          |   56 +
 .../configuration/ams-hbase-security-site.xml   |  167 +
 .../configuration/ams-hbase-site.xml            |  431 +
 .../AMBARI_METRICS/configuration/ams-log4j.xml  |   66 +
 .../AMBARI_METRICS/configuration/ams-site.xml   |  578 ++
 .../4.0/services/AMBARI_METRICS/kerberos.json   |  122 +
 .../4.0/services/AMBARI_METRICS/metainfo.xml    |  147 +
 .../4.0/services/AMBARI_METRICS/metrics.json    | 2472 +++++
 .../alerts/alert_ambari_metrics_monitor.py      |  104 +
 .../package/files/hbaseSmokeVerify.sh           |   34 +
 .../files/service-metrics/AMBARI_METRICS.txt    |  245 +
 .../package/files/service-metrics/FLUME.txt     |   17 +
 .../package/files/service-metrics/HBASE.txt     |  588 ++
 .../package/files/service-metrics/HDFS.txt      |  277 +
 .../package/files/service-metrics/HOST.txt      |   37 +
 .../package/files/service-metrics/KAFKA.txt     |  190 +
 .../package/files/service-metrics/STORM.txt     |    7 +
 .../package/files/service-metrics/YARN.txt      |  178 +
 .../AMBARI_METRICS/package/scripts/__init__.py  |   19 +
 .../AMBARI_METRICS/package/scripts/ams.py       |  388 +
 .../package/scripts/ams_service.py              |  103 +
 .../AMBARI_METRICS/package/scripts/functions.py |   51 +
 .../AMBARI_METRICS/package/scripts/hbase.py     |  267 +
 .../package/scripts/hbase_master.py             |   70 +
 .../package/scripts/hbase_regionserver.py       |   66 +
 .../package/scripts/hbase_service.py            |   53 +
 .../package/scripts/metrics_collector.py        |  133 +
 .../package/scripts/metrics_monitor.py          |   58 +
 .../AMBARI_METRICS/package/scripts/params.py    |  254 +
 .../package/scripts/params_linux.py             |   50 +
 .../package/scripts/params_windows.py           |   53 +
 .../package/scripts/service_check.py            |  165 +
 .../package/scripts/service_mapping.py          |   22 +
 .../package/scripts/split_points.py             |  236 +
 .../AMBARI_METRICS/package/scripts/status.py    |   46 +
 .../package/scripts/status_params.py            |   39 +
 .../package/templates/ams.conf.j2               |   35 +
 .../templates/ams_collector_jaas.conf.j2        |   26 +
 .../templates/ams_zookeeper_jaas.conf.j2        |   26 +
 .../hadoop-metrics2-hbase.properties.j2         |   63 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/metric_groups.conf.j2     |   37 +
 .../package/templates/metric_monitor.ini.j2     |   31 +
 .../package/templates/regionservers.j2          |   20 +
 .../package/templates/smoketest_metrics.json.j2 |   15 +
 .../BigInsights/4.0/services/FLUME/alerts.json  |   27 +
 .../services/FLUME/configuration/flume-conf.xml |   38 +
 .../services/FLUME/configuration/flume-env.xml  |   94 +
 .../BigInsights/4.0/services/FLUME/metainfo.xml |   69 +
 .../BigInsights/4.0/services/FLUME/metrics.json |  430 +
 .../package/alerts/alert_flume_agent_status.py  |  106 +
 .../4.0/services/FLUME/package/scripts/flume.py |  228 +
 .../FLUME/package/scripts/flume_check.py        |   40 +
 .../FLUME/package/scripts/flume_handler.py      |  145 +
 .../FLUME/package/scripts/flume_upgrade.py      |   94 +
 .../services/FLUME/package/scripts/params.py    |  101 +
 .../FLUME/package/scripts/params_linux.py       |   30 +
 .../templates/flume-metrics2.properties.j2      |   26 +
 .../FLUME/package/templates/flume.conf.j2       |   24 +
 .../FLUME/package/templates/log4j.properties.j2 |   67 +
 .../BigInsights/4.0/services/HBASE/alerts.json  |  157 +
 .../services/HBASE/configuration/hbase-env.xml  |  183 +
 .../configuration/hbase-javaopts-properties.xml |   28 +
 .../HBASE/configuration/hbase-log4j.xml         |  144 +
 .../HBASE/configuration/hbase-policy.xml        |   56 +
 .../services/HBASE/configuration/hbase-site.xml |  732 ++
 .../4.0/services/HBASE/kerberos.json            |  159 +
 .../BigInsights/4.0/services/HBASE/metainfo.xml |  161 +
 .../BigInsights/4.0/services/HBASE/metrics.json | 9410 +++++++++++++++++
 .../HBASE/package/files/draining_servers.rb     |  164 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   34 +
 .../services/HBASE/package/scripts/__init__.py  |   19 +
 .../services/HBASE/package/scripts/functions.py |   54 +
 .../4.0/services/HBASE/package/scripts/hbase.py |  232 +
 .../HBASE/package/scripts/hbase_client.py       |   65 +
 .../HBASE/package/scripts/hbase_decommission.py |   74 +
 .../HBASE/package/scripts/hbase_master.py       |  129 +
 .../HBASE/package/scripts/hbase_regionserver.py |  131 +
 .../package/scripts/hbase_restgatewayserver.py  |   84 +
 .../HBASE/package/scripts/hbase_service.py      |   51 +
 .../HBASE/package/scripts/hbase_upgrade.py      |   37 +
 .../services/HBASE/package/scripts/params.py    |  197 +
 .../HBASE/package/scripts/service_check.py      |   78 +
 .../HBASE/package/scripts/status_params.py      |   44 +
 .../services/HBASE/package/scripts/upgrade.py   |   52 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |  109 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |  107 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |   44 +
 .../HBASE/package/templates/hbase.conf.j2       |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/hbase_rest_jaas.conf.j2   |   26 +
 .../HBASE/package/templates/regionservers.j2    |   20 +
 .../BigInsights/4.0/services/HBASE/widgets.json |  510 +
 .../BigInsights/4.0/services/HDFS/alerts.json   |  657 ++
 .../services/HDFS/configuration/core-site.xml   |  203 +
 .../services/HDFS/configuration/hadoop-env.xml  |  322 +
 .../HDFS/configuration/hadoop-policy.xml        |  145 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  202 +
 .../services/HDFS/configuration/hdfs-site.xml   |  669 ++
 .../services/HDFS/configuration/ssl-client.xml  |   65 +
 .../services/HDFS/configuration/ssl-server.xml  |   72 +
 .../BigInsights/4.0/services/HDFS/kerberos.json |  242 +
 .../BigInsights/4.0/services/HDFS/metainfo.xml  |  234 +
 .../BigInsights/4.0/services/HDFS/metrics.json  | 7769 +++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  146 +
 .../package/alerts/alert_ha_namenode_health.py  |  176 +
 .../HDFS/package/files/checkForFormat.sh        |   70 +
 .../services/HDFS/package/files/checkWebUI.py   |   56 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../services/HDFS/package/scripts/datanode.py   |  144 +
 .../HDFS/package/scripts/datanode_upgrade.py    |  114 +
 .../4.0/services/HDFS/package/scripts/hdfs.py   |  129 +
 .../HDFS/package/scripts/hdfs_client.py         |  112 +
 .../HDFS/package/scripts/hdfs_datanode.py       |   75 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  483 +
 .../HDFS/package/scripts/hdfs_nfsgateway.py     |   72 +
 .../HDFS/package/scripts/hdfs_rebalance.py      |  130 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |   50 +
 .../HDFS/package/scripts/journalnode.py         |  169 +
 .../HDFS/package/scripts/journalnode_upgrade.py |  136 +
 .../services/HDFS/package/scripts/namenode.py   |  319 +
 .../HDFS/package/scripts/namenode_ha_state.py   |  205 +
 .../HDFS/package/scripts/namenode_upgrade.py    |  262 +
 .../services/HDFS/package/scripts/nfsgateway.py |  138 +
 .../4.0/services/HDFS/package/scripts/params.py |  326 +
 .../HDFS/package/scripts/service_check.py       |  119 +
 .../services/HDFS/package/scripts/snamenode.py  |  142 +
 .../HDFS/package/scripts/status_params.py       |   42 +
 .../4.0/services/HDFS/package/scripts/utils.py  |  357 +
 .../services/HDFS/package/scripts/zkfc_slave.py |  148 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/package/templates/hdfs.conf.j2         |   35 +
 .../services/HDFS/package/templates/slaves.j2   |   21 +
 .../BigInsights/4.0/services/HDFS/widgets.json  |  428 +
 .../BigInsights/4.0/services/HIVE/alerts.json   |  111 +
 .../services/HIVE/configuration/hcat-env.xml    |   58 +
 .../services/HIVE/configuration/hive-env.xml    |  211 +
 .../HIVE/configuration/hive-exec-log4j.xml      |  119 +
 .../services/HIVE/configuration/hive-log4j.xml  |  137 +
 .../services/HIVE/configuration/hive-site.xml   | 1248 +++
 .../services/HIVE/configuration/webhcat-env.xml |   55 +
 .../HIVE/configuration/webhcat-log4j.xml        |   79 +
 .../HIVE/configuration/webhcat-site.xml         |  188 +
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ++
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  717 ++
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1405 +++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  834 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1537 +++
 .../HIVE/etc/hive-schema-0.14.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.14.0.oracle.sql      |  833 ++
 .../HIVE/etc/hive-schema-0.14.0.postgres.sql    | 1541 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../BigInsights/4.0/services/HIVE/kerberos.json |  112 +
 .../BigInsights/4.0/services/HIVE/metainfo.xml  |  327 +
 .../HIVE/package/alerts/alert_hive_metastore.py |  184 +
 .../package/alerts/alert_hive_thrift_port.py    |  265 +
 .../HIVE/package/alerts/alert_webhcat_server.py |  242 +
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  717 ++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1405 +++
 .../services/HIVE/package/files/addMysqlUser.sh |   37 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   36 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 +
 .../4.0/services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh       |   33 +
 .../HIVE/package/files/startMetastore.sh        |   25 +
 .../HIVE/package/files/templetonSmoke.sh        |  106 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../4.0/services/HIVE/package/scripts/hcat.py   |   73 +
 .../HIVE/package/scripts/hcat_client.py         |   50 +
 .../HIVE/package/scripts/hcat_service_check.py  |   78 +
 .../4.0/services/HIVE/package/scripts/hive.py   |  393 +
 .../HIVE/package/scripts/hive_client.py         |   81 +
 .../HIVE/package/scripts/hive_metastore.py      |  199 +
 .../HIVE/package/scripts/hive_server.py         |  166 +
 .../HIVE/package/scripts/hive_server_upgrade.py |  174 +
 .../HIVE/package/scripts/hive_service.py        |  139 +
 .../HIVE/package/scripts/mysql_server.py        |   64 +
 .../HIVE/package/scripts/mysql_service.py       |   52 +
 .../HIVE/package/scripts/mysql_users.py         |   69 +
 .../HIVE/package/scripts/mysql_utils.py         |   34 +
 .../4.0/services/HIVE/package/scripts/params.py |  418 +
 .../HIVE/package/scripts/postgresql_server.py   |  109 +
 .../HIVE/package/scripts/postgresql_service.py  |   39 +
 .../HIVE/package/scripts/service_check.py       |   91 +
 .../HIVE/package/scripts/status_params.py       |   87 +
 .../services/HIVE/package/scripts/webhcat.py    |  117 +
 .../HIVE/package/scripts/webhcat_server.py      |  146 +
 .../HIVE/package/scripts/webhcat_service.py     |   60 +
 .../package/scripts/webhcat_service_check.py    |  117 +
 .../HIVE/package/templates/hive.conf.j2         |   35 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../package/templates/templeton_smoke.pig.j2    |   24 +
 .../BigInsights/4.0/services/KAFKA/alerts.json  |   32 +
 .../KAFKA/configuration/kafka-broker.xml        |  478 +
 .../services/KAFKA/configuration/kafka-env.xml  |   73 +
 .../KAFKA/configuration/kafka-log4j.xml         |  117 +
 .../4.0/services/KAFKA/kerberos.json            |   49 +
 .../BigInsights/4.0/services/KAFKA/metainfo.xml |   83 +
 .../BigInsights/4.0/services/KAFKA/metrics.json |  264 +
 .../4.0/services/KAFKA/package/scripts/kafka.py |  239 +
 .../KAFKA/package/scripts/kafka_broker.py       |  111 +
 .../KAFKA/package/scripts/kafka_upgrade.py      |   38 +
 .../services/KAFKA/package/scripts/params.py    |  115 +
 .../KAFKA/package/scripts/service_check.py      |   59 +
 .../KAFKA/package/scripts/status_params.py      |   26 +
 .../services/KAFKA/package/scripts/upgrade.py   |   88 +
 .../4.0/services/KAFKA/package/scripts/utils.py |   38 +
 .../KAFKA/package/templates/kafka_jaas.conf.j2  |   41 +
 .../KERBEROS/configuration/kerberos-env.xml     |  326 +
 .../KERBEROS/configuration/krb5-conf.xml        |  113 +
 .../4.0/services/KERBEROS/kerberos.json         |   17 +
 .../4.0/services/KERBEROS/metainfo.xml          |  147 +
 .../KERBEROS/package/scripts/kerberos_client.py |   79 +
 .../KERBEROS/package/scripts/kerberos_common.py |  473 +
 .../KERBEROS/package/scripts/kerberos_server.py |  141 +
 .../services/KERBEROS/package/scripts/params.py |  200 +
 .../KERBEROS/package/scripts/service_check.py   |   81 +
 .../KERBEROS/package/scripts/status_params.py   |   32 +
 .../services/KERBEROS/package/scripts/utils.py  |  105 +
 .../KERBEROS/package/templates/kadm5_acl.j2     |   20 +
 .../KERBEROS/package/templates/kdc_conf.j2      |   30 +
 .../KERBEROS/package/templates/krb5_conf.j2     |   55 +
 .../BigInsights/4.0/services/KNOX/alerts.json   |   32 +
 .../KNOX/configuration/gateway-log4j.xml        |   84 +
 .../KNOX/configuration/gateway-site.xml         |   75 +
 .../services/KNOX/configuration/knox-env.xml    |   68 +
 .../services/KNOX/configuration/ldap-log4j.xml  |   67 +
 .../services/KNOX/configuration/topology.xml    |  158 +
 .../services/KNOX/configuration/users-ldif.xml  |  139 +
 .../BigInsights/4.0/services/KNOX/kerberos.json |   62 +
 .../BigInsights/4.0/services/KNOX/metainfo.xml  |   88 +
 .../KNOX/package/files/validateKnoxStatus.py    |   42 +
 .../4.0/services/KNOX/package/scripts/knox.py   |  134 +
 .../KNOX/package/scripts/knox_gateway.py        |  290 +
 .../services/KNOX/package/scripts/knox_ldap.py  |   54 +
 .../4.0/services/KNOX/package/scripts/ldap.py   |   55 +
 .../4.0/services/KNOX/package/scripts/params.py |  172 +
 .../KNOX/package/scripts/service_check.py       |   92 +
 .../KNOX/package/scripts/status_params.py       |   50 +
 .../services/KNOX/package/scripts/upgrade.py    |   72 +
 .../package/templates/krb5JAASLogin.conf.j2     |   29 +
 .../BigInsights/4.0/services/OOZIE/alerts.json  |   45 +
 .../services/OOZIE/configuration/oozie-env.xml  |  201 +
 .../OOZIE/configuration/oozie-log4j.xml         |  147 +
 .../services/OOZIE/configuration/oozie-site.xml |  416 +
 .../4.0/services/OOZIE/kerberos.json            |   70 +
 .../BigInsights/4.0/services/OOZIE/metainfo.xml |  176 +
 .../package/alerts/alert_check_oozie_server.py  |  211 +
 .../services/OOZIE/package/files/oozieSmoke2.sh |   88 +
 .../files/prepareOozieHdfsDirectories.sh        |   45 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 +
 .../4.0/services/OOZIE/package/scripts/oozie.py |  279 +
 .../OOZIE/package/scripts/oozie_client.py       |   76 +
 .../OOZIE/package/scripts/oozie_server.py       |  193 +
 .../package/scripts/oozie_server_upgrade.py     |  300 +
 .../OOZIE/package/scripts/oozie_service.py      |  124 +
 .../services/OOZIE/package/scripts/params.py    |  259 +
 .../OOZIE/package/scripts/service_check.py      |  140 +
 .../OOZIE/package/scripts/status_params.py      |   47 +
 .../OOZIE/package/templates/adminusers.txt.j2   |   28 +
 .../package/templates/oozie-log4j.properties.j2 |   93 +
 .../4.0/services/PIG/configuration/pig-env.xml  |   39 +
 .../services/PIG/configuration/pig-log4j.xml    |   66 +
 .../PIG/configuration/pig-properties.xml        |  632 ++
 .../BigInsights/4.0/services/PIG/kerberos.json  |   17 +
 .../BigInsights/4.0/services/PIG/metainfo.xml   |   86 +
 .../4.0/services/PIG/package/files/pigSmoke.sh  |   18 +
 .../4.0/services/PIG/package/scripts/params.py  |   25 +
 .../PIG/package/scripts/params_linux.py         |   88 +
 .../4.0/services/PIG/package/scripts/pig.py     |   61 +
 .../services/PIG/package/scripts/pig_client.py  |   59 +
 .../PIG/package/scripts/service_check.py        |  123 +
 .../SLIDER/configuration/slider-client.xml      |   61 +
 .../SLIDER/configuration/slider-env.xml         |   44 +
 .../SLIDER/configuration/slider-log4j.xml       |   90 +
 .../4.0/services/SLIDER/metainfo.xml            |  135 +
 .../SLIDER/package/files/hbaseSmokeVerify.sh    |   34 +
 .../services/SLIDER/package/scripts/__init__.py |   19 +
 .../services/SLIDER/package/scripts/params.py   |   53 +
 .../SLIDER/package/scripts/service_check.py     |   42 +
 .../services/SLIDER/package/scripts/slider.py   |   60 +
 .../SLIDER/package/scripts/slider_client.py     |   62 +
 .../package/templates/storm-slider-env.sh.j2    |   38 +
 .../services/SOLR/configuration/solr-env.xml    |  216 +
 .../services/SOLR/configuration/solr-log4j.xml  |   83 +
 .../services/SOLR/configuration/solr-site.xml   |   47 +
 .../BigInsights/4.0/services/SOLR/kerberos.json |   47 +
 .../BigInsights/4.0/services/SOLR/metainfo.xml  |   74 +
 .../services/SOLR/package/scripts/__init__.py   |   19 +
 .../4.0/services/SOLR/package/scripts/params.py |  182 +
 .../SOLR/package/scripts/service_check.py       |   60 +
 .../4.0/services/SOLR/package/scripts/solr.py   |  143 +
 .../SOLR/package/scripts/solr_client.py         |   36 +
 .../SOLR/package/scripts/solr_server.py         |  118 +
 .../SOLR/package/scripts/solr_service.py        |   59 +
 .../SOLR/package/scripts/solr_upgrade.py        |  135 +
 .../SOLR/package/scripts/status_params.py       |   34 +
 .../services/SOLR/package/templates/solr.xml.j2 |   51 +
 .../SOLR/package/templates/solr_jaas.conf.j2    |   26 +
 .../BigInsights/4.0/services/SPARK/alerts.json  |   32 +
 .../SPARK/configuration/spark-defaults.xml      |  175 +
 .../services/SPARK/configuration/spark-env.xml  |  116 +
 .../configuration/spark-javaopts-properties.xml |   28 +
 .../SPARK/configuration/spark-log4j.xml         |   43 +
 .../configuration/spark-metrics-properties.xml  |  161 +
 .../4.0/services/SPARK/kerberos.json            |   55 +
 .../BigInsights/4.0/services/SPARK/metainfo.xml |  187 +
 .../SPARK/package/scripts/job_history_server.py |  167 +
 .../services/SPARK/package/scripts/params.py    |  199 +
 .../SPARK/package/scripts/service_check.py      |   78 +
 .../4.0/services/SPARK/package/scripts/spark.py |  351 +
 .../SPARK/package/scripts/spark_client.py       |   61 +
 .../package/scripts/spark_thrift_server.py      |  125 +
 .../SPARK/package/scripts/status_params.py      |   41 +
 .../package/templates/spark-defaults.conf.j2    |   43 +
 .../services/SQOOP/configuration/sqoop-env.xml  |   62 +
 .../BigInsights/4.0/services/SQOOP/metainfo.xml |   93 +
 .../services/SQOOP/package/scripts/__init__.py  |   19 +
 .../services/SQOOP/package/scripts/params.py    |   95 +
 .../SQOOP/package/scripts/service_check.py      |   44 +
 .../4.0/services/SQOOP/package/scripts/sqoop.py |   84 +
 .../SQOOP/package/scripts/sqoop_client.py       |   57 +
 .../4.0/services/YARN/MAPREDUCE2_metrics.json   | 2596 +++++
 .../4.0/services/YARN/YARN_metrics.json         | 3486 +++++++
 .../4.0/services/YARN/YARN_widgets.json         |  617 ++
 .../BigInsights/4.0/services/YARN/alerts.json   |  398 +
 .../YARN/configuration-mapred/mapred-env.xml    |   87 +
 .../YARN/configuration-mapred/mapred-site.xml   |  519 +
 .../YARN/configuration/capacity-scheduler.xml   |  172 +
 .../services/YARN/configuration/yarn-env.xml    |  253 +
 .../services/YARN/configuration/yarn-log4j.xml  |   72 +
 .../services/YARN/configuration/yarn-site.xml   |  820 ++
 .../BigInsights/4.0/services/YARN/kerberos.json |  208 +
 .../BigInsights/4.0/services/YARN/metainfo.xml  |  264 +
 .../package/alerts/alert_nodemanager_health.py  |  143 +
 .../alerts/alert_nodemanagers_summary.py        |  122 +
 .../files/validateYarnComponentStatus.py        |  170 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |  139 +
 .../YARN/package/scripts/historyserver.py       |  155 +
 .../package/scripts/mapred_service_check.py     |   80 +
 .../YARN/package/scripts/mapreduce2_client.py   |   56 +
 .../YARN/package/scripts/nodemanager.py         |  144 +
 .../YARN/package/scripts/nodemanager_upgrade.py |   74 +
 .../4.0/services/YARN/package/scripts/params.py |  224 +
 .../YARN/package/scripts/resourcemanager.py     |  179 +
 .../services/YARN/package/scripts/service.py    |   76 +
 .../YARN/package/scripts/service_check.py       |   89 +
 .../YARN/package/scripts/status_params.py       |   44 +
 .../4.0/services/YARN/package/scripts/yarn.py   |  277 +
 .../YARN/package/scripts/yarn_client.py         |   56 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../YARN/package/templates/mapreduce.conf.j2    |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/package/templates/yarn.conf.j2         |   35 +
 .../4.0/services/ZOOKEEPER/alerts.json          |   58 +
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |   91 +
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   77 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |  102 +
 .../4.0/services/ZOOKEEPER/kerberos.json        |   39 +
 .../4.0/services/ZOOKEEPER/metainfo.xml         |   91 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 +
 .../ZOOKEEPER/package/files/zkService.sh        |   26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   93 +
 .../ZOOKEEPER/package/scripts/__init__.py       |   20 +
 .../ZOOKEEPER/package/scripts/params.py         |   96 +
 .../ZOOKEEPER/package/scripts/service_check.py  |   53 +
 .../ZOOKEEPER/package/scripts/status_params.py  |   43 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |  114 +
 .../package/scripts/zookeeper_client.py         |   71 +
 .../package/scripts/zookeeper_server.py         |  161 +
 .../package/scripts/zookeeper_service.py        |   58 +
 .../package/templates/configuration.xsl.j2      |   42 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   53 +
 .../templates/zookeeper_client_jaas.conf.j2     |   23 +
 .../package/templates/zookeeper_jaas.conf.j2    |   26 +
 .../BigInsights/4.0/services/stack_advisor.py   |   24 +
 .../4.0/stack-advisor/stack_advisor_206.py      | 2006 ++++
 .../4.0/stack-advisor/stack_advisor_21.py       |  259 +
 .../4.0/stack-advisor/stack_advisor_22.py       | 1713 ++++
 .../4.0/stack-advisor/stack_advisor_23.py       |  995 ++
 .../4.0/stack-advisor/stack_advisor_24.py       |   29 +
 .../4.0/stack-advisor/stack_advisor_25.py       | 1939 ++++
 .../stacks/BigInsights/4.0/widgets.json         |   95 +
 .../stacks/BigInsights/4.1/kerberos.json        |   47 +
 .../stacks/BigInsights/4.1/metainfo.xml         |   23 +
 .../stacks/BigInsights/4.1/repos/repoinfo.xml   |   44 +
 .../4.1/repos/repoinfo.xml.amd64_RH6            |   32 +
 .../4.1/repos/repoinfo.xml.amd64_RH7            |   32 +
 .../4.1/repos/repoinfo.xml.amd64_SLES           |   32 +
 .../4.1/repos/repoinfo.xml.ppc64le_RH7          |   32 +
 .../4.1/repos/repoinfo.xml.s390x_RH7            |   32 +
 .../BigInsights/4.1/role_command_order.json     |   22 +
 .../4.1/services/AMBARI_METRICS/metainfo.xml    |   27 +
 .../services/FLUME/configuration/flume-env.xml  |   72 +
 .../BigInsights/4.1/services/FLUME/metainfo.xml |   36 +
 .../BigInsights/4.1/services/HBASE/metainfo.xml |   45 +
 .../4.1/services/HBASE/themes/theme.json        |  367 +
 .../services/HDFS/configuration/hadoop-env.xml  |  168 +
 .../services/HDFS/configuration/hdfs-site.xml   |   48 +
 .../BigInsights/4.1/services/HDFS/metainfo.xml  |  127 +
 .../4.1/services/HDFS/themes/theme.json         |  179 +
 .../BigInsights/4.1/services/HDFS/widgets.json  |  644 ++
 .../services/HIVE/configuration/hive-env.xml    |  196 +
 .../services/HIVE/configuration/hive-site.xml   |  356 +
 .../BigInsights/4.1/services/HIVE/metainfo.xml  |  106 +
 .../4.1/services/HIVE/themes/theme.json         |  327 +
 .../BigInsights/4.1/services/KAFKA/metainfo.xml |   27 +
 .../4.1/services/KERBEROS/metainfo.xml          |   26 +
 .../BigInsights/4.1/services/KNOX/metainfo.xml  |   46 +
 .../services/OOZIE/configuration/oozie-site.xml |   65 +
 .../BigInsights/4.1/services/OOZIE/metainfo.xml |  144 +
 .../BigInsights/4.1/services/PIG/metainfo.xml   |   38 +
 .../4.1/services/SLIDER/metainfo.xml            |   46 +
 .../BigInsights/4.1/services/SOLR/metainfo.xml  |   29 +
 .../BigInsights/4.1/services/SPARK/metainfo.xml |   52 +
 .../BigInsights/4.1/services/SQOOP/metainfo.xml |   45 +
 .../4.1/services/YARN/YARN_widgets.json         |  676 ++
 .../YARN/configuration-mapred/mapred-site.xml   |   53 +
 .../services/YARN/configuration/yarn-site.xml   |   46 +
 .../BigInsights/4.1/services/YARN/metainfo.xml  |   82 +
 .../4.1/services/YARN/themes-mapred/theme.json  |  132 +
 .../4.1/services/YARN/themes/theme.json         |  250 +
 .../4.1/services/ZOOKEEPER/metainfo.xml         |   38 +
 .../BigInsights/4.1/services/stack_advisor.py   |   37 +
 .../4.2.5/hooks/after-INSTALL/scripts/hook.py   |   37 +
 .../4.2.5/hooks/after-INSTALL/scripts/params.py |  101 +
 .../scripts/shared_initialization.py            |  108 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |   63 +
 .../4.2.5/hooks/before-ANY/scripts/hook.py      |   36 +
 .../4.2.5/hooks/before-ANY/scripts/params.py    |  241 +
 .../before-ANY/scripts/shared_initialization.py |  253 +
 .../4.2.5/hooks/before-INSTALL/scripts/hook.py  |   37 +
 .../hooks/before-INSTALL/scripts/params.py      |  113 +
 .../scripts/repo_initialization.py              |   70 +
 .../scripts/shared_initialization.py            |   37 +
 .../4.2.5/hooks/before-RESTART/scripts/hook.py  |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 0 -> 28296598 bytes
 .../before-START/files/task-log4j.properties    |  134 +
 .../hooks/before-START/files/topology_script.py |   66 +
 .../before-START/scripts/custom_extensions.py   |  168 +
 .../4.2.5/hooks/before-START/scripts/hook.py    |   41 +
 .../4.2.5/hooks/before-START/scripts/params.py  |  318 +
 .../before-START/scripts/rack_awareness.py      |   47 +
 .../scripts/shared_initialization.py            |  177 +
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |  108 +
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../stacks/BigInsights/4.2.5/kerberos.json      |   47 +
 .../stacks/BigInsights/4.2.5/metainfo.xml       |   25 +
 .../stacks/BigInsights/4.2.5/repos/repoinfo.xml |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_RH6          |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_RH7          |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_SLES         |   32 +
 .../4.2.5/repos/repoinfo.xml.ppc64le_RH7        |   32 +
 .../BigInsights/4.2.5/role_command_order.json   |   31 +
 .../4.2.5/services/AMBARI_INFRA/metainfo.xml    |   26 +
 .../AMBARI_INFRA/role_command_order.json        |    7 +
 .../4.2.5/services/AMBARI_METRICS/metainfo.xml  |   27 +
 .../4.2.5/services/FLUME/metainfo.xml           |   39 +
 .../services/HBASE/configuration/hbase-env.xml  |  198 +
 .../services/HBASE/configuration/hbase-site.xml |  391 +
 .../HBASE/configuration/ranger-hbase-audit.xml  |  121 +
 .../ranger-hbase-plugin-properties.xml          |   83 +
 .../ranger-hbase-policymgr-ssl.xml              |   66 +
 .../configuration/ranger-hbase-security.xml     |   68 +
 .../4.2.5/services/HBASE/kerberos.json          |  212 +
 .../4.2.5/services/HBASE/metainfo.xml           |   88 +
 .../4.2.5/services/HBASE/metrics.json           | 9370 +++++++++++++++++
 .../services/HBASE/quicklinks/quicklinks.json   |  121 +
 .../4.2.5/services/HBASE/themes/theme.json      |  411 +
 .../4.2.5/services/HBASE/widgets.json           |  510 +
 .../services/HDFS/configuration/core-site.xml   |   53 +
 .../services/HDFS/configuration/hadoop-env.xml  |  212 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  225 +
 .../services/HDFS/configuration/hdfs-site.xml   |  148 +
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  121 +
 .../ranger-hdfs-plugin-properties.xml           |   78 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   66 +
 .../HDFS/configuration/ranger-hdfs-security.xml |   64 +
 .../4.2.5/services/HDFS/kerberos.json           |  247 +
 .../4.2.5/services/HDFS/metainfo.xml            |  138 +
 .../services/HDFS/quicklinks/quicklinks.json    |   92 +
 .../4.2.5/services/HDFS/themes/theme.json       |  179 +
 .../4.2.5/services/HDFS/widgets.json            |  649 ++
 .../HIVE/configuration/beeline-log4j2.xml       |   80 +
 .../hive-atlas-application.properties.xml       |   61 +
 .../services/HIVE/configuration/hive-env.xml    |  235 +
 .../HIVE/configuration/hive-exec-log4j2.xml     |  101 +
 .../HIVE/configuration/hive-interactive-env.xml |  257 +
 .../configuration/hive-interactive-site.xml     |  513 +
 .../services/HIVE/configuration/hive-log4j2.xml |  108 +
 .../services/HIVE/configuration/hive-site.xml   | 1772 ++++
 .../HIVE/configuration/hivemetastore-site.xml   |   43 +
 .../hiveserver2-interactive-site.xml            |   52 +
 .../HIVE/configuration/hiveserver2-site.xml     |  108 +
 .../HIVE/configuration/llap-cli-log4j2.xml      |  109 +
 .../HIVE/configuration/llap-daemon-log4j.xml    |  176 +
 .../HIVE/configuration/ranger-hive-audit.xml    |  183 +
 .../ranger-hive-plugin-properties.xml           |   62 +
 .../configuration/ranger-hive-policymgr-ssl.xml |   66 +
 .../HIVE/configuration/ranger-hive-security.xml |   68 +
 .../HIVE/configuration/webhcat-site.xml         |  128 +
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../4.2.5/services/HIVE/kerberos.json           |  149 +
 .../4.2.5/services/HIVE/metainfo.xml            |  291 +
 .../services/HIVE/quicklinks/quicklinks.json    |   68 +
 .../4.2.5/services/HIVE/themes/theme.json       |  327 +
 .../4.2.5/services/JNBG/metainfo.xml            |   26 +
 .../KAFKA/configuration/ranger-kafka-audit.xml  |   58 +
 .../ranger-kafka-policymgr-ssl.xml              |   34 +
 .../4.2.5/services/KAFKA/kerberos.json          |   70 +
 .../4.2.5/services/KAFKA/metainfo.xml           |   56 +
 .../4.2.5/services/KERBEROS/metainfo.xml        |   27 +
 .../KNOX/configuration/gateway-site.xml         |   29 +
 .../services/KNOX/configuration/knox-env.xml    |   35 +
 .../KNOX/configuration/knoxsso-topology.xml     |  126 +
 .../KNOX/configuration/ranger-knox-audit.xml    |  121 +
 .../ranger-knox-plugin-properties.xml           |  157 +
 .../configuration/ranger-knox-policymgr-ssl.xml |   66 +
 .../KNOX/configuration/ranger-knox-security.xml |   58 +
 .../services/KNOX/configuration/topology.xml    |  215 +
 .../4.2.5/services/KNOX/kerberos.json           |   81 +
 .../4.2.5/services/KNOX/metainfo.xml            |   61 +
 .../4.2.5/services/LOGSEARCH/metainfo.xml       |   26 +
 .../services/LOGSEARCH/role_command_order.json  |    9 +
 .../4.2.5/services/OOZIE/metainfo.xml           |   65 +
 .../PIG/configuration/pig-properties.xml        |  632 ++
 .../BigInsights/4.2.5/services/PIG/metainfo.xml |   40 +
 .../4.2.5/services/R4ML/metainfo.xml            |   37 +
 .../configuration/ranger-tagsync-site.xml       |   46 +
 .../RANGER/configuration/ranger-ugsync-site.xml |   46 +
 .../4.2.5/services/RANGER/metainfo.xml          |   76 +
 .../RANGER_KMS/configuration/dbks-site.xml      |  104 +
 .../RANGER_KMS/configuration/kms-env.xml        |   44 +
 .../configuration/ranger-kms-audit.xml          |   85 +
 .../configuration/ranger-kms-policymgr-ssl.xml  |   34 +
 .../4.2.5/services/RANGER_KMS/kerberos.json     |   84 +
 .../4.2.5/services/RANGER_KMS/metainfo.xml      |   56 +
 .../RANGER_KMS/themes/theme_version_2.json      |  303 +
 .../4.2.5/services/SLIDER/metainfo.xml          |   46 +
 .../SPARK/configuration/spark-defaults.xml      |   32 +
 .../configuration/spark-thrift-sparkconf.xml    |   32 +
 .../4.2.5/services/SPARK/kerberos.json          |   70 +
 .../4.2.5/services/SPARK/metainfo.xml           |   67 +
 .../SPARK2/configuration/spark2-defaults.xml    |  130 +
 .../SPARK2/configuration/spark2-env.xml         |  146 +
 .../configuration/spark2-hive-site-override.xml |   67 +
 .../spark2-javaopts-properties.xml              |   29 +
 .../configuration/spark2-thrift-sparkconf.xml   |  168 +
 .../4.2.5/services/SPARK2/metainfo.xml          |  112 +
 .../sqoop-atlas-application.properties.xml      |   47 +
 .../services/SQOOP/configuration/sqoop-site.xml |   30 +
 .../4.2.5/services/SQOOP/kerberos.json          |   20 +
 .../4.2.5/services/SQOOP/metainfo.xml           |   47 +
 .../4.2.5/services/SYSTEMML/metainfo.xml        |   37 +
 .../4.2.5/services/TITAN/metainfo.xml           |   40 +
 .../4.2.5/services/YARN/YARN_widgets.json       |  670 ++
 .../YARN/configuration-mapred/mapred-env.xml    |   50 +
 .../YARN/configuration-mapred/mapred-site.xml   |  139 +
 .../YARN/configuration/capacity-scheduler.xml   |   70 +
 .../YARN/configuration/ranger-yarn-audit.xml    |  121 +
 .../ranger-yarn-plugin-properties.xml           |   82 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 +
 .../YARN/configuration/ranger-yarn-security.xml |   58 +
 .../services/YARN/configuration/yarn-env.xml    |  198 +
 .../services/YARN/configuration/yarn-log4j.xml  |  103 +
 .../services/YARN/configuration/yarn-site.xml   |  762 ++
 .../4.2.5/services/YARN/kerberos.json           |  278 +
 .../4.2.5/services/YARN/metainfo.xml            |  140 +
 .../YARN/quicklinks-mapred/quicklinks.json      |   92 +
 .../services/YARN/quicklinks/quicklinks.json    |   92 +
 .../services/YARN/themes-mapred/theme.json      |  132 +
 .../4.2.5/services/YARN/themes/theme.json       |  250 +
 .../4.2.5/services/ZOOKEEPER/metainfo.xml       |   37 +
 .../BigInsights/4.2.5/services/stack_advisor.py |  180 +
 .../4.2.5/upgrades/config-upgrade.xml           |  214 +
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |  969 ++
 .../stacks/BigInsights/4.2/metainfo.xml         |   25 +
 .../stacks/BigInsights/4.2/repos/repoinfo.xml   |   44 +
 .../4.2/repos/repoinfo.xml.amd64_RH6            |   32 +
 .../4.2/repos/repoinfo.xml.amd64_RH7            |   32 +
 .../4.2/repos/repoinfo.xml.amd64_SLES           |   32 +
 .../4.2/repos/repoinfo.xml.ppc64le_RH7          |   32 +
 .../4.2/repos/repoinfo.xml.s390x_RH7            |   32 +
 .../BigInsights/4.2/role_command_order.json     |   31 +
 .../4.2/services/AMBARI_METRICS/alerts.json     |  183 +
 .../AMBARI_METRICS/configuration/ams-env.xml    |  114 +
 .../configuration/ams-hbase-env.xml             |  245 +
 .../configuration/ams-hbase-log4j.xml           |  147 +
 .../configuration/ams-hbase-policy.xml          |   56 +
 .../configuration/ams-hbase-security-site.xml   |  167 +
 .../configuration/ams-hbase-site.xml            |  431 +
 .../AMBARI_METRICS/configuration/ams-log4j.xml  |   66 +
 .../AMBARI_METRICS/configuration/ams-site.xml   |  578 ++
 .../4.2/services/AMBARI_METRICS/kerberos.json   |  122 +
 .../4.2/services/AMBARI_METRICS/metainfo.xml    |  147 +
 .../4.2/services/AMBARI_METRICS/metrics.json    | 2472 +++++
 .../alerts/alert_ambari_metrics_monitor.py      |  104 +
 .../package/files/hbaseSmokeVerify.sh           |   34 +
 .../files/service-metrics/AMBARI_METRICS.txt    |  245 +
 .../package/files/service-metrics/FLUME.txt     |   17 +
 .../package/files/service-metrics/HBASE.txt     |  588 ++
 .../package/files/service-metrics/HDFS.txt      |  277 +
 .../package/files/service-metrics/HOST.txt      |   37 +
 .../package/files/service-metrics/KAFKA.txt     |  190 +
 .../package/files/service-metrics/STORM.txt     |    7 +
 .../package/files/service-metrics/YARN.txt      |  178 +
 .../AMBARI_METRICS/package/scripts/__init__.py  |   19 +
 .../AMBARI_METRICS/package/scripts/ams.py       |  388 +
 .../package/scripts/ams_service.py              |  103 +
 .../AMBARI_METRICS/package/scripts/functions.py |   51 +
 .../AMBARI_METRICS/package/scripts/hbase.py     |  267 +
 .../package/scripts/hbase_master.py             |   70 +
 .../package/scripts/hbase_regionserver.py       |   66 +
 .../package/scripts/hbase_service.py            |   53 +
 .../package/scripts/metrics_collector.py        |  133 +
 .../package/scripts/metrics_monitor.py          |   59 +
 .../AMBARI_METRICS/package/scripts/params.py    |  257 +
 .../package/scripts/params_linux.py             |   50 +
 .../package/scripts/params_windows.py           |   53 +
 .../package/scripts/service_check.py            |  166 +
 .../package/scripts/service_mapping.py          |   22 +
 .../package/scripts/split_points.py             |  236 +
 .../AMBARI_METRICS/package/scripts/status.py    |   46 +
 .../package/scripts/status_params.py            |   39 +
 .../package/templates/ams.conf.j2               |   35 +
 .../templates/ams_collector_jaas.conf.j2        |   26 +
 .../templates/ams_zookeeper_jaas.conf.j2        |   26 +
 .../hadoop-metrics2-hbase.properties.j2         |   63 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/metric_groups.conf.j2     |   37 +
 .../package/templates/metric_monitor.ini.j2     |   31 +
 .../package/templates/regionservers.j2          |   20 +
 .../package/templates/smoketest_metrics.json.j2 |   15 +
 .../BigInsights/4.2/services/FLUME/alerts.json  |   27 +
 .../services/FLUME/configuration/flume-conf.xml |   38 +
 .../services/FLUME/configuration/flume-env.xml  |  103 +
 .../4.2/services/FLUME/kerberos.json            |   51 +
 .../BigInsights/4.2/services/FLUME/metainfo.xml |   69 +
 .../BigInsights/4.2/services/FLUME/metrics.json |  430 +
 .../package/alerts/alert_flume_agent_status.py  |  106 +
 .../4.2/services/FLUME/package/scripts/flume.py |  229 +
 .../FLUME/package/scripts/flume_check.py        |   40 +
 .../FLUME/package/scripts/flume_handler.py      |  145 +
 .../FLUME/package/scripts/flume_upgrade.py      |   88 +
 .../services/FLUME/package/scripts/params.py    |  101 +
 .../FLUME/package/scripts/params_linux.py       |   30 +
 .../templates/flume-metrics2.properties.j2      |   26 +
 .../FLUME/package/templates/flume.conf.j2       |   24 +
 .../FLUME/package/templates/log4j.properties.j2 |   67 +
 .../BigInsights/4.2/services/HBASE/alerts.json  |  157 +
 .../services/HBASE/configuration/hbase-env.xml  |  205 +
 .../configuration/hbase-javaopts-properties.xml |   29 +
 .../HBASE/configuration/hbase-log4j.xml         |  147 +
 .../HBASE/configuration/hbase-policy.xml        |   56 +
 .../services/HBASE/configuration/hbase-site.xml |  816 ++
 .../HBASE/configuration/ranger-hbase-audit.xml  |  193 +
 .../ranger-hbase-plugin-properties.xml          |  234 +
 .../ranger-hbase-policymgr-ssl.xml              |   67 +
 .../configuration/ranger-hbase-security.xml     |   75 +
 .../4.2/services/HBASE/kerberos.json            |  188 +
 .../BigInsights/4.2/services/HBASE/metainfo.xml |  176 +
 .../BigInsights/4.2/services/HBASE/metrics.json | 9420 ++++++++++++++++++
 .../HBASE/package/files/draining_servers.rb     |  164 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   34 +
 .../services/HBASE/package/scripts/__init__.py  |   19 +
 .../services/HBASE/package/scripts/functions.py |   54 +
 .../4.2/services/HBASE/package/scripts/hbase.py |  236 +
 .../HBASE/package/scripts/hbase_client.py       |   72 +
 .../HBASE/package/scripts/hbase_decommission.py |   74 +
 .../HBASE/package/scripts/hbase_master.py       |  131 +
 .../HBASE/package/scripts/hbase_regionserver.py |  132 +
 .../package/scripts/hbase_restgatewayserver.py  |   83 +
 .../HBASE/package/scripts/hbase_service.py      |   51 +
 .../HBASE/package/scripts/hbase_upgrade.py      |   37 +
 .../services/HBASE/package/scripts/params.py    |  363 +
 .../package/scripts/phoenix_queryserver.py      |   77 +
 .../HBASE/package/scripts/phoenix_service.py    |   50 +
 .../HBASE/package/scripts/service_check.py      |   79 +
 .../HBASE/package/scripts/setup_ranger_hbase.py |   84 +
 .../HBASE/package/scripts/status_params.py      |   46 +
 .../services/HBASE/package/scripts/upgrade.py   |   52 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |  109 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |  107 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |   44 +
 .../HBASE/package/templates/hbase.conf.j2       |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_queryserver_jaas.conf.j2    |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/hbase_rest_jaas.conf.j2   |   26 +
 .../HBASE/package/templates/regionservers.j2    |   20 +
 .../services/HBASE/quicklinks/quicklinks.json   |  121 +
 .../BigInsights/4.2/services/HBASE/widgets.json |  510 +
 .../BigInsights/4.2/services/HDFS/alerts.json   |  760 ++
 .../services/HDFS/configuration/core-site.xml   |  250 +
 .../services/HDFS/configuration/hadoop-env.xml  |  415 +
 .../HDFS/configuration/hadoop-policy.xml        |  145 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  226 +
 .../services/HDFS/configuration/hdfs-site.xml   |  752 ++
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  193 +
 .../ranger-hdfs-plugin-properties.xml           |  244 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 +
 .../HDFS/configuration/ranger-hdfs-security.xml |   71 +
 .../services/HDFS/configuration/ssl-client.xml  |   65 +
 .../services/HDFS/configuration/ssl-server.xml  |   72 +
 .../BigInsights/4.2/services/HDFS/kerberos.json |  230 +
 .../BigInsights/4.2/services/HDFS/metainfo.xml  |  283 +
 .../BigInsights/4.2/services/HDFS/metrics.json  | 7899 +++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  223 +
 .../alerts/alert_datanode_unmounted_data_dir.py |  164 +
 .../package/alerts/alert_ha_namenode_health.py  |  261 +
 .../package/alerts/alert_upgrade_finalized.py   |  171 +
 .../HDFS/package/files/checkForFormat.sh        |   71 +
 .../services/HDFS/package/files/checkWebUI.py   |   54 +
 .../services/HDFS/package/scripts/__init__.py   |   20 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../services/HDFS/package/scripts/datanode.py   |  158 +
 .../HDFS/package/scripts/datanode_upgrade.py    |  141 +
 .../4.2/services/HDFS/package/scripts/hdfs.py   |  131 +
 .../HDFS/package/scripts/hdfs_client.py         |  113 +
 .../HDFS/package/scripts/hdfs_datanode.py       |   76 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  488 +
 .../HDFS/package/scripts/hdfs_nfsgateway.py     |   72 +
 .../HDFS/package/scripts/hdfs_rebalance.py      |  130 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |   53 +
 .../HDFS/package/scripts/install_params.py      |   39 +
 .../HDFS/package/scripts/journalnode.py         |  169 +
 .../HDFS/package/scripts/journalnode_upgrade.py |  140 +
 .../services/HDFS/package/scripts/namenode.py   |  334 +
 .../HDFS/package/scripts/namenode_ha_state.py   |  216 +
 .../HDFS/package/scripts/namenode_upgrade.py    |  269 +
 .../services/HDFS/package/scripts/nfsgateway.py |  137 +
 .../4.2/services/HDFS/package/scripts/params.py |  463 +
 .../HDFS/package/scripts/service_check.py       |  109 +
 .../HDFS/package/scripts/setup_ranger_hdfs.py   |   90 +
 .../services/HDFS/package/scripts/snamenode.py  |  142 +
 .../HDFS/package/scripts/status_params.py       |   44 +
 .../4.2/services/HDFS/package/scripts/utils.py  |  407 +
 .../services/HDFS/package/scripts/zkfc_slave.py |  150 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/package/templates/hdfs.conf.j2         |   35 +
 .../services/HDFS/package/templates/slaves.j2   |   21 +
 .../services/HDFS/quicklinks/quicklinks.json    |   92 +
 .../BigInsights/4.2/services/HDFS/widgets.json  |  644 ++
 .../BigInsights/4.2/services/HIVE/alerts.json   |  111 +
 .../services/HIVE/configuration/hcat-env.xml    |   58 +
 .../services/HIVE/configuration/hive-env.xml    |  351 +
 .../HIVE/configuration/hive-exec-log4j.xml      |  122 +
 .../services/HIVE/configuration/hive-log4j.xml  |  140 +
 .../services/HIVE/configuration/hive-site.xml   | 1961 ++++
 .../HIVE/configuration/hiveserver2-site.xml     |   77 +
 .../HIVE/configuration/ranger-hive-audit.xml    |  193 +
 .../ranger-hive-plugin-properties.xml           |  223 +
 .../configuration/ranger-hive-policymgr-ssl.xml |   67 +
 .../HIVE/configuration/ranger-hive-security.xml |   76 +
 .../services/HIVE/configuration/webhcat-env.xml |   55 +
 .../HIVE/configuration/webhcat-log4j.xml        |   82 +
 .../HIVE/configuration/webhcat-site.xml         |  184 +
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ++
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 ++
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 +++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 +++
 .../HIVE/etc/hive-schema-0.14.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.14.0.oracle.sql      |  833 ++
 .../HIVE/etc/hive-schema-0.14.0.postgres.sql    | 1541 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../BigInsights/4.2/services/HIVE/kerberos.json |  114 +
 .../BigInsights/4.2/services/HIVE/metainfo.xml  |  351 +
 .../HIVE/package/alerts/alert_hive_metastore.py |  193 +
 .../package/alerts/alert_hive_thrift_port.py    |  269 +
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 +
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 ++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 +++
 .../services/HIVE/package/files/addMysqlUser.sh |   37 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   41 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 +
 .../4.2/services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh       |   33 +
 .../HIVE/package/files/startMetastore.sh        |   25 +
 .../HIVE/package/files/templetonSmoke.sh        |   93 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../4.2/services/HIVE/package/scripts/hcat.py   |   73 +
 .../HIVE/package/scripts/hcat_client.py         |   50 +
 .../HIVE/package/scripts/hcat_service_check.py  |   78 +
 .../4.2/services/HIVE/package/scripts/hive.py   |  432 +
 .../HIVE/package/scripts/hive_client.py         |   83 +
 .../HIVE/package/scripts/hive_metastore.py      |  234 +
 .../HIVE/package/scripts/hive_server.py         |  177 +
 .../HIVE/package/scripts/hive_server_upgrade.py |  138 +
 .../HIVE/package/scripts/hive_service.py        |  143 +
 .../HIVE/package/scripts/mysql_server.py        |   64 +
 .../HIVE/package/scripts/mysql_service.py       |   57 +
 .../HIVE/package/scripts/mysql_users.py         |   70 +
 .../HIVE/package/scripts/mysql_utils.py         |   35 +
 .../4.2/services/HIVE/package/scripts/params.py |   29 +
 .../HIVE/package/scripts/params_linux.py        |  565 ++
 .../HIVE/package/scripts/params_windows.py      |   76 +
 .../HIVE/package/scripts/postgresql_server.py   |  109 +
 .../HIVE/package/scripts/postgresql_service.py  |   39 +
 .../HIVE/package/scripts/service_check.py       |   91 +
 .../HIVE/package/scripts/setup_ranger_hive.py   |   77 +
 .../HIVE/package/scripts/status_params.py       |   96 +
 .../services/HIVE/package/scripts/webhcat.py    |  111 +
 .../HIVE/package/scripts/webhcat_server.py      |  147 +
 .../HIVE/package/scripts/webhcat_service.py     |   75 +
 .../package/scripts/webhcat_service_check.py    |  120 +
 .../HIVE/package/templates/hive.conf.j2         |   36 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../package/templates/templeton_smoke.pig.j2    |   24 +
 .../BigInsights/4.2/services/KAFKA/alerts.json  |   32 +
 .../KAFKA/configuration/kafka-broker.xml        |  475 +
 .../services/KAFKA/configuration/kafka-env.xml  |   86 +
 .../KAFKA/configuration/kafka-log4j.xml         |  118 +
 .../4.2/services/KAFKA/kerberos.json            |   50 +
 .../BigInsights/4.2/services/KAFKA/metainfo.xml |   84 +
 .../BigInsights/4.2/services/KAFKA/metrics.json |  239 +
 .../4.2/services/KAFKA/package/scripts/kafka.py |  243 +
 .../KAFKA/package/scripts/kafka_broker.py       |  107 +
 .../services/KAFKA/package/scripts/params.py    |  157 +
 .../KAFKA/package/scripts/service_check.py      |   65 +
 .../KAFKA/package/scripts/status_params.py      |   26 +
 .../services/KAFKA/package/scripts/upgrade.py   |   78 +
 .../4.2/services/KAFKA/package/scripts/utils.py |   38 +
 .../KAFKA/package/templates/kafka.conf.j2       |   35 +
 .../package/templates/kafka_client_jaas.conf.j2 |   29 +
 .../KAFKA/package/templates/kafka_jaas.conf.j2  |   41 +
 .../package/templates/tools-log4j.properties.j2 |   21 +
 .../KERBEROS/configuration/kerberos-env.xml     |  326 +
 .../KERBEROS/configuration/krb5-conf.xml        |  113 +
 .../4.2/services/KERBEROS/kerberos.json         |   17 +
 .../4.2/services/KERBEROS/metainfo.xml          |  147 +
 .../KERBEROS/package/scripts/kerberos_client.py |   79 +
 .../KERBEROS/package/scripts/kerberos_common.py |  473 +
 .../KERBEROS/package/scripts/kerberos_server.py |  141 +
 .../services/KERBEROS/package/scripts/params.py |  200 +
 .../KERBEROS/package/scripts/service_check.py   |   81 +
 .../KERBEROS/package/scripts/status_params.py   |   32 +
 .../services/KERBEROS/package/scripts/utils.py  |  105 +
 .../KERBEROS/package/templates/kadm5_acl.j2     |   20 +
 .../KERBEROS/package/templates/kdc_conf.j2      |   30 +
 .../KERBEROS/package/templates/krb5_conf.j2     |   55 +
 .../BigInsights/4.2/services/KNOX/alerts.json   |   32 +
 .../KNOX/configuration/gateway-log4j.xml        |   84 +
 .../KNOX/configuration/gateway-site.xml         |   79 +
 .../services/KNOX/configuration/knox-env.xml    |   81 +
 .../services/KNOX/configuration/ldap-log4j.xml  |   67 +
 .../KNOX/configuration/ranger-knox-audit.xml    |  193 +
 .../ranger-knox-plugin-properties.xml           |  241 +
 .../configuration/ranger-knox-policymgr-ssl.xml |   67 +
 .../KNOX/configuration/ranger-knox-security.xml |   65 +
 .../services/KNOX/configuration/topology.xml    |  182 +
 .../services/KNOX/configuration/users-ldif.xml  |  139 +
 .../BigInsights/4.2/services/KNOX/kerberos.json |   63 +
 .../BigInsights/4.2/services/KNOX/metainfo.xml  |   92 +
 .../KNOX/package/files/validateKnoxStatus.py    |   43 +
 .../4.2/services/KNOX/package/scripts/knox.py   |  162 +
 .../KNOX/package/scripts/knox_gateway.py        |  307 +
 .../services/KNOX/package/scripts/knox_ldap.py  |   54 +
 .../4.2/services/KNOX/package/scripts/ldap.py   |   55 +
 .../4.2/services/KNOX/package/scripts/params.py |  459 +
 .../KNOX/package/scripts/service_check.py       |   92 +
 .../KNOX/package/scripts/setup_ranger_knox.py   |   77 +
 .../KNOX/package/scripts/status_params.py       |   50 +
 .../services/KNOX/package/scripts/upgrade.py    |   93 +
 .../package/templates/krb5JAASLogin.conf.j2     |   30 +
 .../BigInsights/4.2/services/OOZIE/alerts.json  |   45 +
 .../services/OOZIE/configuration/oozie-env.xml  |  201 +
 .../OOZIE/configuration/oozie-log4j.xml         |  147 +
 .../services/OOZIE/configuration/oozie-site.xml |  416 +
 .../4.2/services/OOZIE/kerberos.json            |   70 +
 .../BigInsights/4.2/services/OOZIE/metainfo.xml |  172 +
 .../package/alerts/alert_check_oozie_server.py  |  211 +
 .../services/OOZIE/package/files/oozieSmoke2.sh |   90 +
 .../files/prepareOozieHdfsDirectories.sh        |   46 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 +
 .../4.2/services/OOZIE/package/scripts/oozie.py |  279 +
 .../OOZIE/package/scripts/oozie_client.py       |   76 +
 .../OOZIE/package/scripts/oozie_server.py       |  193 +
 .../package/scripts/oozie_server_upgrade.py     |  300 +
 .../OOZIE/package/scripts/oozie_service.py      |  124 +
 .../services/OOZIE/package/scripts/params.py    |  262 +
 .../OOZIE/package/scripts/service_check.py      |  140 +
 .../OOZIE/package/scripts/status_params.py      |   47 +
 .../OOZIE/package/templates/adminusers.txt.j2   |   28 +
 .../package/templates/oozie-log4j.properties.j2 |   93 +
 .../services/OOZIE/quicklinks/quicklinks.json   |   48 +
 .../4.2/services/PIG/configuration/pig-env.xml  |   39 +
 .../services/PIG/configuration/pig-log4j.xml    |   66 +
 .../PIG/configuration/pig-properties.xml        |  632 ++
 .../BigInsights/4.2/services/PIG/kerberos.json  |   17 +
 .../BigInsights/4.2/services/PIG/metainfo.xml   |   87 +
 .../4.2/services/PIG/package/files/pigSmoke.sh  |   18 +
 .../4.2/services/PIG/package/scripts/params.py  |   25 +
 .../PIG/package/scripts/params_linux.py         |   88 +
 .../4.2/services/PIG/package/scripts/pig.py     |   61 +
 .../services/PIG/package/scripts/pig_client.py  |   59 +
 .../PIG/package/scripts/service_check.py        |  123 +
 .../BigInsights/4.2/services/RANGER/alerts.json |   74 +
 .../RANGER/configuration/admin-properties.xml   |  287 +
 .../RANGER/configuration/ranger-admin-site.xml  |  571 ++
 .../RANGER/configuration/ranger-env.xml         |  465 +
 .../RANGER/configuration/ranger-site.xml        |   76 +
 .../RANGER/configuration/ranger-ugsync-site.xml |  496 +
 .../configuration/usersync-properties.xml       |  126 +
 .../4.2/services/RANGER/metainfo.xml            |  107 +
 .../alerts/alert_ranger_admin_passwd_check.py   |  180 +
 .../services/RANGER/package/scripts/params.py   |  208 +
 .../RANGER/package/scripts/ranger_admin.py      |  194 +
 .../RANGER/package/scripts/ranger_service.py    |   47 +
 .../RANGER/package/scripts/ranger_usersync.py   |   82 +
 .../RANGER/package/scripts/service_check.py     |   51 +
 .../RANGER/package/scripts/setup_ranger.py      |  137 +
 .../RANGER/package/scripts/setup_ranger_xml.py  |  467 +
 .../services/RANGER/package/scripts/upgrade.py  |   30 +
 .../4.2/services/RANGER/themes/theme.json       | 1397 +++
 .../4.2/services/RANGER_KMS/alerts.json         |   32 +
 .../RANGER_KMS/configuration/dbks-site.xml      |  101 +
 .../RANGER_KMS/configuration/kms-env.xml        |   55 +
 .../RANGER_KMS/configuration/kms-log4j.xml      |   69 +
 .../RANGER_KMS/configuration/kms-properties.xml |  104 +
 .../RANGER_KMS/configuration/kms-site.xml       |  146 +
 .../configuration/ranger-kms-audit.xml          |  153 +
 .../configuration/ranger-kms-policymgr-ssl.xml  |   73 +
 .../configuration/ranger-kms-security.xml       |   65 +
 .../configuration/ranger-kms-site.xml           |   66 +
 .../4.2/services/RANGER_KMS/kerberos.json       |   49 +
 .../4.2/services/RANGER_KMS/metainfo.xml        |   89 +
 .../services/RANGER_KMS/package/scripts/kms.py  |  489 +
 .../RANGER_KMS/package/scripts/kms_server.py    |   96 +
 .../RANGER_KMS/package/scripts/kms_service.py   |   49 +
 .../RANGER_KMS/package/scripts/params.py        |  246 +
 .../RANGER_KMS/package/scripts/service_check.py |   41 +
 .../RANGER_KMS/package/scripts/upgrade.py       |   29 +
 .../SLIDER/configuration/slider-client.xml      |   61 +
 .../SLIDER/configuration/slider-env.xml         |   44 +
 .../SLIDER/configuration/slider-log4j.xml       |   93 +
 .../4.2/services/SLIDER/kerberos.json           |   17 +
 .../4.2/services/SLIDER/metainfo.xml            |  128 +
 .../SLIDER/package/files/hbaseSmokeVerify.sh    |   34 +
 .../services/SLIDER/package/scripts/__init__.py |   19 +
 .../services/SLIDER/package/scripts/params.py   |   65 +
 .../SLIDER/package/scripts/params_linux.py      |   75 +
 .../SLIDER/package/scripts/params_windows.py    |   45 +
 .../SLIDER/package/scripts/service_check.py     |   59 +
 .../services/SLIDER/package/scripts/slider.py   |   97 +
 .../SLIDER/package/scripts/slider_client.py     |   71 +
 .../package/templates/storm-slider-env.sh.j2    |   38 +
 .../services/SOLR/configuration/solr-env.xml    |  244 +
 .../services/SOLR/configuration/solr-log4j.xml  |   83 +
 .../services/SOLR/configuration/solr-site.xml   |   47 +
 .../BigInsights/4.2/services/SOLR/kerberos.json |   53 +
 .../BigInsights/4.2/services/SOLR/metainfo.xml  |   82 +
 .../services/SOLR/package/scripts/__init__.py   |   19 +
 .../4.2/services/SOLR/package/scripts/params.py |  205 +
 .../SOLR/package/scripts/service_check.py       |   61 +
 .../4.2/services/SOLR/package/scripts/solr.py   |  100 +
 .../SOLR/package/scripts/solr_client.py         |   36 +
 .../SOLR/package/scripts/solr_server.py         |  107 +
 .../SOLR/package/scripts/solr_service.py        |   72 +
 .../SOLR/package/scripts/solr_upgrade.py        |  135 +
 .../SOLR/package/scripts/status_params.py       |   32 +
 .../services/SOLR/package/templates/solr.xml.j2 |   51 +
 .../SOLR/package/templates/solr_jaas.conf.j2    |   26 +
 .../BigInsights/4.2/services/SPARK/alerts.json  |   32 +
 .../SPARK/configuration/spark-defaults.xml      |  175 +
 .../services/SPARK/configuration/spark-env.xml  |  120 +
 .../configuration/spark-javaopts-properties.xml |   28 +
 .../SPARK/configuration/spark-log4j.xml         |   43 +
 .../configuration/spark-metrics-properties.xml  |  161 +
 .../4.2/services/SPARK/kerberos.json            |   55 +
 .../BigInsights/4.2/services/SPARK/metainfo.xml |  197 +
 .../SPARK/package/scripts/job_history_server.py |  167 +
 .../services/SPARK/package/scripts/params.py    |  219 +
 .../SPARK/package/scripts/service_check.py      |  132 +
 .../4.2/services/SPARK/package/scripts/spark.py |  353 +
 .../SPARK/package/scripts/spark_check.py        |   76 +
 .../SPARK/package/scripts/spark_client.py       |   62 +
 .../package/scripts/spark_thrift_server.py      |  119 +
 .../SPARK/package/scripts/status_params.py      |   36 +
 .../package/templates/spark-defaults.conf.j2    |   43 +
 .../services/SPARK/quicklinks/quicklinks.json   |   47 +
 .../services/SQOOP/configuration/sqoop-env.xml  |   62 +
 .../BigInsights/4.2/services/SQOOP/metainfo.xml |   95 +
 .../services/SQOOP/package/scripts/__init__.py  |   19 +
 .../services/SQOOP/package/scripts/params.py    |   95 +
 .../SQOOP/package/scripts/service_check.py      |   44 +
 .../4.2/services/SQOOP/package/scripts/sqoop.py |   85 +
 .../SQOOP/package/scripts/sqoop_client.py       |   57 +
 .../4.2/services/SYSTEMML/metainfo.xml          |   77 +
 .../SYSTEMML/package/scripts/__init__.py        |   19 +
 .../services/SYSTEMML/package/scripts/params.py |   40 +
 .../SYSTEMML/package/scripts/service_check.py   |   43 +
 .../SYSTEMML/package/scripts/systemml_client.py |   49 +
 .../services/TITAN/configuration/titan-env.xml  |   48 +
 .../TITAN/configuration/titan-hbase-solr.xml    |   67 +
 .../TITAN/configuration/titan-log4j.xml         |   66 +
 .../4.2/services/TITAN/kerberos.json            |   17 +
 .../BigInsights/4.2/services/TITAN/metainfo.xml |   88 +
 .../TITAN/package/files/titanSmoke.groovy       |   20 +
 .../services/TITAN/package/scripts/params.py    |  128 +
 .../TITAN/package/scripts/service_check.py      |   64 +
 .../4.2/services/TITAN/package/scripts/titan.py |   70 +
 .../TITAN/package/scripts/titan_client.py       |   58 +
 .../4.2/services/YARN/MAPREDUCE2_metrics.json   | 2596 +++++
 .../4.2/services/YARN/YARN_metrics.json         | 3486 +++++++
 .../4.2/services/YARN/YARN_widgets.json         |  617 ++
 .../BigInsights/4.2/services/YARN/alerts.json   |  414 +
 .../YARN/configuration-mapred/mapred-env.xml    |  103 +
 .../YARN/configuration-mapred/mapred-site.xml   |  585 ++
 .../YARN/configuration/capacity-scheduler.xml   |  172 +
 .../YARN/configuration/ranger-yarn-audit.xml    |  193 +
 .../ranger-yarn-plugin-properties.xml           |   86 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   67 +
 .../YARN/configuration/ranger-yarn-security.xml |   65 +
 .../services/YARN/configuration/yarn-env.xml    |  262 +
 .../services/YARN/configuration/yarn-log4j.xml  |   84 +
 .../services/YARN/configuration/yarn-site.xml   | 1160 +++
 .../BigInsights/4.2/services/YARN/kerberos.json |  224 +
 .../BigInsights/4.2/services/YARN/metainfo.xml  |  286 +
 .../package/alerts/alert_nodemanager_health.py  |  201 +
 .../alerts/alert_nodemanagers_summary.py        |  197 +
 .../files/validateYarnComponentStatus.py        |  170 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |  139 +
 .../YARN/package/scripts/historyserver.py       |  158 +
 .../package/scripts/mapred_service_check.py     |   80 +
 .../YARN/package/scripts/mapreduce2_client.py   |   56 +
 .../YARN/package/scripts/nodemanager.py         |  144 +
 .../YARN/package/scripts/nodemanager_upgrade.py |   76 +
 .../4.2/services/YARN/package/scripts/params.py |  412 +
 .../YARN/package/scripts/resourcemanager.py     |  181 +
 .../services/YARN/package/scripts/service.py    |   81 +
 .../YARN/package/scripts/service_check.py       |   91 +
 .../YARN/package/scripts/setup_ranger_yarn.py   |   67 +
 .../YARN/package/scripts/status_params.py       |   44 +
 .../4.2/services/YARN/package/scripts/yarn.py   |  445 +
 .../YARN/package/scripts/yarn_client.py         |   56 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../YARN/package/templates/mapreduce.conf.j2    |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/package/templates/yarn.conf.j2         |   35 +
 .../YARN/quicklinks-mapred/quicklinks.json      |   92 +
 .../services/YARN/quicklinks/quicklinks.json    |   92 +
 .../4.2/services/ZOOKEEPER/alerts.json          |   58 +
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |   91 +
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   77 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |  102 +
 .../4.2/services/ZOOKEEPER/kerberos.json        |   39 +
 .../4.2/services/ZOOKEEPER/metainfo.xml         |   91 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 +
 .../ZOOKEEPER/package/files/zkService.sh        |   26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   93 +
 .../ZOOKEEPER/package/scripts/__init__.py       |   20 +
 .../ZOOKEEPER/package/scripts/params.py         |   96 +
 .../ZOOKEEPER/package/scripts/service_check.py  |   53 +
 .../ZOOKEEPER/package/scripts/status_params.py  |   43 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |  114 +
 .../package/scripts/zookeeper_client.py         |   71 +
 .../package/scripts/zookeeper_server.py         |  161 +
 .../package/scripts/zookeeper_service.py        |   58 +
 .../package/templates/configuration.xsl.j2      |   42 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   53 +
 .../templates/zookeeper_client_jaas.conf.j2     |   23 +
 .../package/templates/zookeeper_jaas.conf.j2    |   26 +
 .../BigInsights/4.2/services/stack_advisor.py   |  105 +
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  240 +
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |  995 ++
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../templates/hadoop-metrics2.properties.j2     |   14 +-
 .../HDP/2.0.6/properties/stack_features.json    |  848 +-
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml  |    1 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |   11 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   14 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   13 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |    6 +-
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml |    6 +-
 .../configuration/application-properties.xml    |   17 +
 .../services/ATLAS/configuration/atlas-env.xml  |    6 +
 .../services/HIVE/configuration/hive-env.xml    |   78 +-
 .../HIVE/configuration/hive-interactive-env.xml |   62 +-
 .../services/HIVE/configuration/hive-site.xml   |   35 +
 .../HIVE/configuration/parquet-logging.xml      |  106 +
 .../HIVE/configuration/ranger-hive-security.xml |   33 +
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  |  151 +
 .../services/STORM/configuration/storm-site.xml |   61 +
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |    2 +-
 .../stacks/HDP/2.6/services/stack_advisor.py    |   21 +
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   23 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   14 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |    3 +
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../src/main/resources/stacks/stack_advisor.py  |   18 +
 .../actionmanager/TestActionDBAccessorImpl.java |   44 +-
 .../ComponentVersionAlertRunnableTest.java      |    5 +-
 .../creator/UpgradeEventCreatorTest.java        |    6 +-
 .../checks/ComponentExistsInRepoCheckTest.java  |  329 +
 .../checks/PreviousUpgradeCompletedTest.java    |   13 +-
 .../AmbariCustomCommandExecutionHelperTest.java |    6 +-
 .../BlueprintConfigurationProcessorTest.java    |   38 +-
 ...ClusterStackVersionResourceProviderTest.java |  276 +-
 .../PreUpgradeCheckResourceProviderTest.java    |   12 +-
 .../internal/RequestResourceProviderTest.java   |   11 +-
 .../StackUpgradeConfigurationMergeTest.java     |   35 +-
 .../UpgradeResourceProviderHDP22Test.java       |   15 +-
 .../internal/UpgradeResourceProviderTest.java   |  130 +-
 .../UpgradeSummaryResourceProviderTest.java     |   22 +-
 .../metrics/JMXPropertyProviderTest.java        |    9 +
 .../upgrade/StackVersionListenerTest.java       |   98 +-
 .../dispatchers/AlertScriptDispatcherTest.java  |   28 +-
 .../ambari/server/orm/DBAccessorImplTest.java   |   91 +
 .../ambari/server/orm/dao/UpgradeDAOTest.java   |   32 +-
 .../ChangeStackReferencesActionTest.java        |  103 +
 .../ComponentVersionCheckActionTest.java        |  121 +-
 .../upgrades/UpgradeActionTest.java             |  267 +-
 .../UpgradeUserKerberosDescriptorTest.java      |   19 +-
 .../server/state/ServiceComponentTest.java      |   31 +-
 .../ambari/server/state/UpgradeHelperTest.java  |  319 +-
 .../state/alerts/AlertDefinitionHashTest.java   |    4 +-
 .../cluster/ClusterEffectiveVersionTest.java    |   68 +-
 .../server/state/cluster/ClustersTest.java      |   13 +-
 .../KerberosDescriptorUpdateHelperTest.java     |   70 +
 .../AlertNoticeDispatchServiceTest.java         |   23 +
 .../services/RetryUpgradeActionServiceTest.java |   30 +-
 .../stack/upgrade/StageWrapperBuilderTest.java  |   32 +-
 .../server/topology/BlueprintImplTest.java      |   30 +-
 .../ClusterConfigurationRequestTest.java        |  113 +-
 .../server/topology/LogicalRequestTest.java     |   94 +-
 .../RequiredConfigPropertiesValidatorTest.java  |  234 +
 .../src/test/python/TestAmbariServer.py         |   35 +-
 .../src/test/python/TestStackFeature.py         |   44 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/custom_actions/test_ru_set_all.py    |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |   23 -
 .../stacks/2.0.6/HIVE/test_hive_server.py       |    2 +
 .../stacks/2.0.6/YARN/test_historyserver.py     |   21 +-
 .../configs/ha_bootstrap_standby_node.json      |    2 +-
 ...ha_bootstrap_standby_node_initial_start.json |    2 +-
 ...dby_node_initial_start_dfs_nameservices.json |    2 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |    2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |    2 +-
 .../stacks/2.1/STORM/test_storm_nimbus.py       |   60 +-
 .../stacks/2.1/configs/default-storm-start.json |   14 +
 .../test/python/stacks/2.1/configs/default.json |   13 +
 .../2.1/configs/hive-metastore-upgrade.json     |    2 +-
 .../stacks/2.1/configs/secured-storm-start.json |   13 +
 .../test/python/stacks/2.1/configs/secured.json |   15 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |    2 +-
 .../stacks/2.5/common/test_stack_advisor.py     |  150 +-
 .../2.5/configs/ranger-admin-default.json       |  990 +-
 .../2.5/configs/ranger-admin-secured.json       | 1108 +-
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +--
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +--
 .../stacks/2.6/common/test_stack_advisor.py     |  123 +-
 .../test/python/stacks/2.6/configs/default.json |    3 +-
 .../2.6/configs/ranger-admin-default.json       |  953 +-
 .../2.6/configs/ranger-admin-secured.json       | 1066 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 .../controllers/global/cluster_controller.js    |    1 +
 .../main/admin/stack_and_upgrade_controller.js  |   59 +-
 ambari-web/app/controllers/main/host/details.js |   83 +-
 ambari-web/app/controllers/main/service.js      |   13 +-
 ambari-web/app/mappers/hosts_mapper.js          |   12 +-
 ambari-web/app/messages.js                      |    5 +
 .../mixins/common/configs/enhanced_configs.js   |    3 +-
 .../stack_version/stack_upgrade_history.js      |   17 +-
 .../modal_popups/install_repo_confirmation.hbs  |   27 +
 .../admin/stack_upgrade/upgrade_history.hbs     |    4 +-
 .../main/admin/stack_upgrade/versions.hbs       |    7 +
 ambari-web/app/utils/ajax/ajax.js               |   49 +-
 .../admin/stack_upgrade/upgrade_history_view.js |    2 +-
 .../stack_upgrade/upgrade_version_box_view.js   |    8 +-
 .../main/admin/stack_upgrade/versions_view.js   |   35 +-
 .../admin/stack_and_upgrade_controller_test.js  |   48 +-
 .../test/controllers/main/host/details_test.js  |   64 +-
 .../test/controllers/main/service_test.js       |    4 +-
 .../upgrade_version_box_view_test.js            |   11 +-
 .../admin/stack_upgrade/version_view_test.js    |  152 +-
 .../view/filebrowser/DownloadService.java       |    4 +-
 .../hive2/resources/uploads/UploadService.java  |    4 +-
 .../src/main/resources/ui/app/styles/app.scss   |    1 +
 .../slider/src/main/resources/ui/package.json   |   19 +-
 .../ui/app/components/flow-designer.js          |   27 +
 .../resources/ui/app/components/hdfs-browser.js |    8 +-
 .../ui/app/domain/actionjob_hanlder.js          |    3 +
 .../ui/app/domain/workflow-xml-generator.js     |    3 +
 .../ui/app/services/workspace-manager.js        |    5 +
 .../src/main/resources/ui/app/styles/app.less   |    5 +-
 .../app/templates/components/flow-designer.hbs  |    9 +-
 .../app/templates/components/hdfs-browser.hbs   |   16 +-
 .../app/templates/components/spark-action.hbs   |    7 +-
 .../main/resources/ui/app/utils/constants.js    |    3 +-
 .../main/resources/ui/app/utils/hdfsviewer.js   |    3 +
 .../wfmanager/src/main/resources/ui/bower.json  |    5 +-
 .../addon/components/directory-viewer.js        |  325 +-
 .../hdfs-directory-viewer/addon/styles/app.css  |  190 +
 .../templates/components/directory-viewer.hbs   |  146 +
 .../addon/utils/viewer-config.js                |    3 +
 .../hdfs-directory-viewer/bower.json            |    1 -
 .../hdfs-directory-viewer/index.js              |    3 -
 .../hdfs-directory-viewer/package.json          |    5 +-
 .../src/main/resources/ui/package.json          |    3 +-
 .../wfmanager/src/main/resources/ui/yarn.lock   |   68 +-
 dev-support/docker/docker/bin/ambaribuild.py    |   21 +-
 .../docker/docker/bin/test/ambaribuild_test.py  |   17 +
 pom.xml                                         |    4 +
 1518 files changed, 254548 insertions(+), 6497 deletions(-)
----------------------------------------------------------------------



[22/50] [abbrv] ambari git commit: AMBARI-21436 - Add a Prerequisite Check To Ensure that the Target Stack Has All Existing Components (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21436 - Add a Prerequisite Check To Ensure that the Target Stack Has All Existing Components (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/113b381e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/113b381e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/113b381e

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 113b381ecc530e7b1daf9a4cd7e546c1631df451
Parents: 7dbcb75
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Jul 13 15:59:45 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jul 13 17:34:26 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/checks/CheckDescription.java  |  38 ++-
 .../checks/ComponentsExistInRepoCheck.java      | 142 ++++++++
 .../org/apache/ambari/server/state/Host.java    |   4 +-
 .../ambari/server/state/host/HostImpl.java      |  29 +-
 .../checks/ComponentExistsInRepoCheckTest.java  | 329 +++++++++++++++++++
 5 files changed, 514 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/113b381e/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
index 2be42fc..640de58 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
@@ -272,17 +272,17 @@ public class CheckDescription {
           "After upgrading, Atlas can be reinstalled").build());
 
   public static CheckDescription SERVICE_PRESENCE_CHECK = new CheckDescription("SERVICE_PRESENCE_CHECK",
-      PrereqCheckType.SERVICE,
-      "Service Is Not Supported For Upgrades",
-      new ImmutableMap.Builder<String, String>()
-        .put(AbstractCheckDescriptor.DEFAULT,
-            "The %s service is currently installed on the cluster. " +
-            "This service does not support upgrades and must be removed before the upgrade can continue. " +
-            "After upgrading, %s can be reinstalled")
-        .put(ServicePresenceCheck.KEY_SERVICE_REMOVED,
-            "The %s service is currently installed on the cluster. " +
-            "This service is removed from the new release and must be removed before the upgrade can continue. " +
-            "After upgrading, %s can be installed").build());
+    PrereqCheckType.SERVICE,
+    "Service Is Not Supported For Upgrades",
+    new ImmutableMap.Builder<String, String>()
+      .put(AbstractCheckDescriptor.DEFAULT,
+          "The %s service is currently installed on the cluster. " +
+          "This service does not support upgrades and must be removed before the upgrade can continue. " +
+          "After upgrading, %s can be reinstalled")
+      .put(ServicePresenceCheck.KEY_SERVICE_REMOVED,
+          "The %s service is currently installed on the cluster. " +
+          "This service is removed from the new release and must be removed before the upgrade can continue. " +
+          "After upgrading, %s can be installed").build());
 
   public static CheckDescription RANGER_SERVICE_AUDIT_DB_CHECK = new CheckDescription("RANGER_SERVICE_AUDIT_DB_CHECK",
     PrereqCheckType.SERVICE,
@@ -325,17 +325,23 @@ public class CheckDescription {
     PrereqCheckType.SERVICE,
     "Change Ranger SSL configuration path for Keystore and Truststore.",
     new ImmutableMap.Builder<String, String>()
-            .put(AbstractCheckDescriptor.DEFAULT,
-              "As Ranger is SSL enabled, Ranger SSL configurations will need to be changed from default value of /etc/ranger/*/conf folder to /etc/ranger/security. " +
-              "Since the certificates/keystores/truststores in this path may affect the upgrade/downgrade process, it is recommended to manually move the certificates/keystores/truststores out of the conf folders and change the appropriate config values before proceeding.").build());
+      .put(AbstractCheckDescriptor.DEFAULT,
+        "As Ranger is SSL enabled, Ranger SSL configurations will need to be changed from default value of /etc/ranger/*/conf folder to /etc/ranger/security. " +
+        "Since the certificates/keystores/truststores in this path may affect the upgrade/downgrade process, it is recommended to manually move the certificates/keystores/truststores out of the conf folders and change the appropriate config values before proceeding.").build());
 
   public static CheckDescription JAVA_VERSION = new CheckDescription("JAVA_VERSION",
       PrereqCheckType.CLUSTER,
       "Verify Java version requirement",
       new ImmutableMap.Builder<String, String>()
         .put(AbstractCheckDescriptor.DEFAULT, "Ambari requires JDK with minimum version %s. Reconfigure Ambari with a JDK that meets the version requirement.")
-        .build()
-    );
+          .build());
+
+  public static CheckDescription COMPONENTS_EXIST_IN_TARGET_REPO = new CheckDescription("COMPONENTS_EXIST_IN_TARGET_REPO",
+      PrereqCheckType.CLUSTER,
+      "Verify Cluster Components Exist In Target Repository",
+      new ImmutableMap.Builder<String, String>()
+        .put(AbstractCheckDescriptor.DEFAULT, "The following components do not exist in the target repository's stack. They must be removed from the cluster before upgrading.")
+          .build());
 
   private String m_name;
   private PrereqCheckType m_type;

http://git-wip-us.apache.org/repos/asf/ambari/blob/113b381e/ambari-server/src/main/java/org/apache/ambari/server/checks/ComponentsExistInRepoCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ComponentsExistInRepoCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ComponentsExistInRepoCheck.java
new file mode 100644
index 0000000..d60433d
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ComponentsExistInRepoCheck.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.text.MessageFormat;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.StackAccessException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.inject.Singleton;
+
+/**
+ * The {@link ComponentsExistInRepoCheck} is used to determine if any of the
+ * components scheduled for upgrade do not exist in the target repository or
+ * stack.
+ */
+@Singleton
+@UpgradeCheck(
+    group = UpgradeCheckGroup.TOPOLOGY,
+    required = { UpgradeType.ROLLING, UpgradeType.NON_ROLLING, UpgradeType.HOST_ORDERED })
+public class ComponentsExistInRepoCheck extends AbstractCheckDescriptor {
+
+  /**
+   * Constructor.
+   */
+  public ComponentsExistInRepoCheck() {
+    super(CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO);
+  }
+
+  @Override
+  public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request)
+      throws AmbariException {
+    final String clusterName = request.getClusterName();
+    final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+    StackId sourceStack = request.getSourceStackId();
+    StackId targetStack = request.getTargetStackId();
+
+    Set<String> failedServices = new TreeSet<>();
+    Set<String> failedComponents = new TreeSet<>();
+
+    @Experimental(
+        feature = ExperimentalFeature.PATCH_UPGRADES,
+        comment = "Assumes all service participate in the upgrade")
+    Map<String, Service> servicesInUpgrade = cluster.getServices();
+    for (String serviceName : servicesInUpgrade.keySet()) {
+      try {
+        ServiceInfo serviceInfo = ambariMetaInfo.get().getService(targetStack.getStackName(),
+            targetStack.getStackVersion(), serviceName);
+
+        if (serviceInfo.isDeleted() || !serviceInfo.isValid()) {
+          failedServices.add(serviceName);
+          continue;
+        }
+
+        Service service = servicesInUpgrade.get(serviceName);
+        Map<String, ServiceComponent> componentsInUpgrade = service.getServiceComponents();
+        for (String componentName : componentsInUpgrade.keySet()) {
+          try {
+            ComponentInfo componentInfo = ambariMetaInfo.get().getComponent(
+                targetStack.getStackName(), targetStack.getStackVersion(), serviceName,
+                componentName);
+
+            // if this component isn't included in the upgrade, then skip it
+            if (!componentInfo.isVersionAdvertised()) {
+              continue;
+            }
+
+            if (componentInfo.isDeleted()) {
+              failedComponents.add(componentName);
+            }
+
+          } catch (StackAccessException stackAccessException) {
+            failedComponents.add(componentName);
+          }
+        }
+      } catch (StackAccessException stackAccessException) {
+        failedServices.add(serviceName);
+      }
+    }
+
+    if( failedServices.isEmpty() && failedComponents.isEmpty() ){
+      prerequisiteCheck.setStatus(PrereqCheckStatus.PASS);
+      return;
+    }
+
+    LinkedHashSet<String> failedOn = new LinkedHashSet<>();
+    failedOn.addAll(failedServices);
+    failedOn.addAll(failedComponents);
+
+    prerequisiteCheck.setFailedOn(failedOn);
+    prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+
+    String message = "The following {0} exist in {1} but are not included in {2}. They must be removed before upgrading.";
+    String messageFragment = "";
+    if (!failedServices.isEmpty()) {
+      messageFragment = "services";
+    }
+
+    if( !failedComponents.isEmpty() ){
+      if(!StringUtils.isEmpty(messageFragment)){
+        messageFragment += " and ";
+      }
+
+      messageFragment += "components";
+    }
+
+    message = MessageFormat.format(message, messageFragment, sourceStack, targetStack);
+    prerequisiteCheck.setFailReason(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/113b381e/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
index 241659a..78587af 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
@@ -395,7 +395,9 @@ public interface Host extends Comparable {
   List<HostVersionEntity> getAllHostVersions();
 
   /**
-   * Gets whether this host has components which advertise their version.
+   * Gets whether this host has components which advertise their version for the
+   * given stack. If the components on the host do not exist in the given stack,
+   * then it is assumed they do not advertise their version.
    *
    * @param stackId
    *          the version of the stack to use when checking version

http://git-wip-us.apache.org/repos/asf/ambari/blob/113b381e/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 328fe22..42ec945 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -30,6 +30,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.HostNotFoundException;
+import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.agent.AgentEnv;
 import org.apache.ambari.server.agent.DiskInfo;
 import org.apache.ambari.server.agent.HostInfo;
@@ -148,7 +149,7 @@ public class HostImpl implements Host {
 
   private long lastHeartbeatTime = 0L;
   private AgentEnv lastAgentEnv = null;
-  private List<DiskInfo> disksInfo = new CopyOnWriteArrayList<DiskInfo>();
+  private List<DiskInfo> disksInfo = new CopyOnWriteArrayList<>();
   private RecoveryReport recoveryReport = new RecoveryReport();
   private Integer currentPingPort = null;
 
@@ -481,7 +482,7 @@ public class HostImpl implements Host {
     // FIXME add all other information into host attributes
     setAgentVersion(new AgentVersion(hostInfo.getAgentUserId()));
 
-    Map<String, String> attrs = new HashMap<String, String>();
+    Map<String, String> attrs = new HashMap<>();
     if (hostInfo.getHardwareIsa() != null) {
       attrs.put(HARDWAREISA, hostInfo.getHardwareIsa());
     }
@@ -828,7 +829,7 @@ public class HostImpl implements Host {
     Map<String, String> hostAttrs = gson.fromJson(hostEntity.getHostAttributes(), hostAttributesType);
 
     if (hostAttrs == null) {
-      hostAttrs = new ConcurrentHashMap<String, String>();
+      hostAttrs = new ConcurrentHashMap<>();
     }
 
     hostAttrs.putAll(hostAttributes);
@@ -1024,7 +1025,7 @@ public class HostImpl implements Host {
 
   @Override
   public Map<String, DesiredConfig> getDesiredConfigs(long clusterId) {
-    Map<String, DesiredConfig> map = new HashMap<String, DesiredConfig>();
+    Map<String, DesiredConfig> map = new HashMap<>();
 
     for (HostConfigMapping e : hostConfigMappingDAO.findSelected(
         clusterId, getHostId())) {
@@ -1045,10 +1046,10 @@ public class HostImpl implements Host {
   @Override
   public Map<String, HostConfig> getDesiredHostConfigs(Cluster cluster,
       Map<String, DesiredConfig> clusterDesiredConfigs) throws AmbariException {
-    Map<String, HostConfig> hostConfigMap = new HashMap<String, HostConfig>();
+    Map<String, HostConfig> hostConfigMap = new HashMap<>();
 
     if( null == cluster ){
-      clusterDesiredConfigs = new HashMap<String, DesiredConfig>();
+      clusterDesiredConfigs = new HashMap<>();
     }
 
     // per method contract, fetch if not supplied
@@ -1173,12 +1174,18 @@ public class HostImpl implements Host {
     HostEntity hostEntity = getHostEntity();
 
     for (HostComponentStateEntity componentState : hostEntity.getHostComponentStateEntities()) {
-      ComponentInfo component = ambariMetaInfo.getComponent(stackId.getStackName(),
-          stackId.getStackVersion(), componentState.getServiceName(),
-          componentState.getComponentName());
+      try {
+        ComponentInfo component = ambariMetaInfo.getComponent(stackId.getStackName(),
+            stackId.getStackVersion(), componentState.getServiceName(),
+            componentState.getComponentName());
 
-      if (component.isVersionAdvertised()) {
-        return true;
+        if (component.isVersionAdvertised()) {
+          return true;
+        }
+      } catch( StackAccessException stackAccessException ){
+        LOG.info("{}/{} does not exist in {} and will not advertise its version for that stack.",
+            componentState.getServiceName(), componentState.getComponentName(),
+            stackId.getStackId());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/113b381e/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentExistsInRepoCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentExistsInRepoCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentExistsInRepoCheckTest.java
new file mode 100644
index 0000000..76e8cc9
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ComponentExistsInRepoCheckTest.java
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.StackAccessException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Provider;
+
+/**
+ * Tests for {@link ComponentsExistInRepoCheck}
+ */
+public class ComponentExistsInRepoCheckTest extends EasyMockSupport {
+
+  private final ComponentsExistInRepoCheck m_check = new ComponentsExistInRepoCheck();
+
+  @Mock
+  private Clusters m_clusters;
+
+  @Mock
+  private Cluster m_cluster;
+
+  // pick two stacks which have different services
+  private final StackId SOURCE_STACK = new StackId("HDP", "0.1");
+  private final StackId TARGET_STACK = new StackId("HDP", "2.2.0");
+
+  private final Map<String, Service> CLUSTER_SERVICES = new HashMap<>();
+  private final Map<String, ServiceComponent> FOO_SERVICE_COMPONENTS = new HashMap<>();
+  private final Map<String, ServiceComponent> ZK_SERVICE_COMPONENTS = new HashMap<>();
+
+  @Mock
+  private AmbariMetaInfo m_ambariMetaInfo;
+
+  @Mock
+  private Service m_fooService;
+
+  @Mock
+  private Service m_zookeeperService;
+
+  @Mock
+  private ServiceInfo m_fooInfo;
+
+  @Mock
+  private ServiceInfo m_zookeeperInfo;
+
+  @Mock
+  private ComponentInfo m_fooComponentInfo;
+
+  @Mock
+  private ComponentInfo m_zookeeperServerInfo;
+
+  @Mock
+  private ServiceComponent m_fooComponent;
+
+  @Mock
+  private ServiceComponent m_zookeeperServer;
+
+  @Before
+  public void before() throws Exception {
+
+    EasyMockSupport.injectMocks(this);
+
+    m_check.clustersProvider = new Provider<Clusters>() {
+      @Override
+      public Clusters get() {
+        return m_clusters;
+      }
+    };
+
+    m_check.ambariMetaInfo = new Provider<AmbariMetaInfo>() {
+      @Override
+      public AmbariMetaInfo get() {
+        return m_ambariMetaInfo;
+      }
+    };
+
+    expect(m_cluster.getServices()).andReturn(CLUSTER_SERVICES).atLeastOnce();
+    expect(m_clusters.getCluster((String) anyObject())).andReturn(m_cluster).anyTimes();
+
+    ZK_SERVICE_COMPONENTS.put("ZOOKEEPER_SERVER", m_zookeeperServer);
+    FOO_SERVICE_COMPONENTS.put("FOO_COMPONENT", m_fooComponent);
+
+    expect(m_zookeeperService.getServiceComponents()).andReturn(ZK_SERVICE_COMPONENTS).anyTimes();
+    expect(m_fooService.getServiceComponents()).andReturn(FOO_SERVICE_COMPONENTS).anyTimes();
+
+    expect(m_zookeeperInfo.getComponentByName("ZOOKEEPER_SERVER")).andReturn(
+        m_zookeeperServerInfo).anyTimes();
+
+    expect(m_fooInfo.getComponentByName("FOO_COMPONENT")).andReturn(m_fooComponentInfo).anyTimes();
+
+    expect(m_ambariMetaInfo.getService(TARGET_STACK.getStackName(), TARGET_STACK.getStackVersion(),
+        "ZOOKEEPER")).andReturn(m_zookeeperInfo).anyTimes();
+
+    expect(m_ambariMetaInfo.getComponent(TARGET_STACK.getStackName(),
+        TARGET_STACK.getStackVersion(), "ZOOKEEPER", "ZOOKEEPER_SERVER")).andReturn(
+            m_zookeeperServerInfo).anyTimes();
+  }
+
+  /**
+   * Tests that the check passes when services and components exist.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckPassesWhenServicAndComponentsExist() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("ZOOKEEPER", m_zookeeperService);
+    expect(m_zookeeperInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperInfo.isDeleted()).andReturn(false).atLeastOnce();
+    expect(m_zookeeperServerInfo.isVersionAdvertised()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperServerInfo.isDeleted()).andReturn(false).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
+    Assert.assertTrue(StringUtils.isBlank(check.getFailReason()));
+  }
+
+  /**
+   * Tests that the check passes when a service doesn't exist but isn't
+   * advertising its version.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckPassesWhenComponentNotAdvertisingVersion() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("FOO_SERVICE", m_fooService);
+
+    expect(m_ambariMetaInfo.getService(TARGET_STACK.getStackName(), TARGET_STACK.getStackVersion(),
+        "FOO_SERVICE")).andReturn(m_fooInfo).anyTimes();
+
+    expect(m_ambariMetaInfo.getComponent(TARGET_STACK.getStackName(),
+        TARGET_STACK.getStackVersion(), "FOO_SERVICE", "FOO_COMPONENT")).andReturn(
+            m_fooComponentInfo).atLeastOnce();
+
+    expect(m_fooInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_fooInfo.isDeleted()).andReturn(false).atLeastOnce();
+    expect(m_fooComponentInfo.isVersionAdvertised()).andReturn(false).atLeastOnce();
+    expect(m_fooComponentInfo.isDeleted()).andReturn(true).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
+    Assert.assertTrue(StringUtils.isBlank(check.getFailReason()));
+  }
+
+  /**
+   * Tests that the check fails when the service exists but was deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckFailsWhenServiceExistsButIsDeleted() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("ZOOKEEPER", m_zookeeperService);
+    expect(m_zookeeperInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperInfo.isDeleted()).andReturn(true).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
+    Assert.assertTrue(check.getFailedOn().contains("ZOOKEEPER"));
+  }
+
+  /**
+   * Tests that the check fails when the component exists but what deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckFailsWhenComponentExistsButIsDeleted() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("ZOOKEEPER", m_zookeeperService);
+    expect(m_zookeeperInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperInfo.isDeleted()).andReturn(false).atLeastOnce();
+    expect(m_zookeeperServerInfo.isVersionAdvertised()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperServerInfo.isDeleted()).andReturn(true).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
+    Assert.assertTrue(check.getFailedOn().contains("ZOOKEEPER_SERVER"));
+  }
+
+  /**
+   * Tests that the check fails when the component exists but what deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckFailsWhenServiceIsMissing() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(
+        CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("ZOOKEEPER", m_zookeeperService);
+    CLUSTER_SERVICES.put("FOO_SERVICE", m_fooService);
+
+    expect(m_ambariMetaInfo.getService(TARGET_STACK.getStackName(), TARGET_STACK.getStackVersion(),
+        "FOO_SERVICE")).andThrow(new StackAccessException(""));
+
+    expect(m_zookeeperInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperInfo.isDeleted()).andReturn(false).atLeastOnce();
+    expect(m_zookeeperServerInfo.isVersionAdvertised()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperServerInfo.isDeleted()).andReturn(false).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
+    Assert.assertTrue(check.getFailedOn().contains("FOO_SERVICE"));
+  }
+
+  /**
+   * Tests that the check fails when the component exists but what deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testCheckFailsWhenComponentIsMissing() throws Exception {
+    PrerequisiteCheck check = new PrerequisiteCheck(
+        CheckDescription.COMPONENTS_EXIST_IN_TARGET_REPO, "c1");
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setSourceStackId(SOURCE_STACK);
+    request.setTargetStackId(TARGET_STACK);
+
+    CLUSTER_SERVICES.put("FOO_SERVICE", m_fooService);
+
+    expect(m_ambariMetaInfo.getService(TARGET_STACK.getStackName(), TARGET_STACK.getStackVersion(),
+        "FOO_SERVICE")).andReturn(m_fooInfo).anyTimes();
+
+    expect(m_ambariMetaInfo.getComponent(TARGET_STACK.getStackName(),
+        TARGET_STACK.getStackVersion(), "FOO_SERVICE", "FOO_COMPONENT")).andThrow(
+            new StackAccessException(""));
+
+    expect(m_zookeeperInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperInfo.isDeleted()).andReturn(false).atLeastOnce();
+    expect(m_zookeeperServerInfo.isVersionAdvertised()).andReturn(true).atLeastOnce();
+    expect(m_zookeeperServerInfo.isDeleted()).andReturn(false).atLeastOnce();
+
+    expect(m_fooInfo.isValid()).andReturn(true).atLeastOnce();
+    expect(m_fooInfo.isDeleted()).andReturn(false).atLeastOnce();
+
+    replayAll();
+
+    Assert.assertTrue(m_check.isApplicable(request));
+
+    m_check.perform(check, request);
+
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
+    Assert.assertTrue(check.getFailedOn().contains("FOO_COMPONENT"));
+  }
+
+}
\ No newline at end of file


[04/50] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21348

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21348


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/267cd8b0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/267cd8b0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/267cd8b0

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 267cd8b0cee17dc84be2b075b7168cd2518f02b6
Parents: d852928 a6ac40b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 14:46:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:46:40 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  28 ++--
 .../actionmanager/ExecutionCommandWrapper.java  |   3 +-
 .../ambari/server/agent/ExecutionCommand.java   |  14 +-
 .../controller/ActionExecutionContext.java      |  30 +++--
 .../controller/AmbariActionExecutionHelper.java |   8 +-
 .../ClusterStackVersionResourceProvider.java    |   2 +-
 .../internal/UpgradeResourceProvider.java       | 107 +++++++++-------
 .../upgrades/FinalizeUpgradeAction.java         |  18 +--
 .../upgrades/UpgradeUserKerberosDescriptor.java |  41 ++----
 .../ambari/server/state/UpgradeContext.java     |  31 +++--
 .../ambari/server/topology/AmbariContext.java   |  30 ++++-
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |  12 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   3 +
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   3 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 --------
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 -----------
 .../SYSTEMML/package/scripts/__init__.py        |  19 ---
 .../services/SYSTEMML/package/scripts/params.py |  40 ------
 .../SYSTEMML/package/scripts/service_check.py   |  43 -------
 .../SYSTEMML/package/scripts/systemml_client.py |  49 -------
 .../services/TITAN/configuration/titan-env.xml  |  48 -------
 .../TITAN/configuration/titan-hbase-solr.xml    |  67 ----------
 .../TITAN/configuration/titan-log4j.xml         |  66 ----------
 .../4.2/services/TITAN/kerberos.json            |  17 ---
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 -------------
 .../TITAN/package/files/titanSmoke.groovy       |  20 ---
 .../services/TITAN/package/scripts/params.py    | 128 -------------------
 .../TITAN/package/scripts/service_check.py      |  64 ----------
 .../4.2/services/TITAN/package/scripts/titan.py |  70 ----------
 .../TITAN/package/scripts/titan_client.py       |  58 ---------
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml |   6 +-
 .../services/HIVE/configuration/hive-site.xml   |  35 +++++
 .../ComponentVersionCheckActionTest.java        |   1 -
 .../upgrades/UpgradeActionTest.java             |   2 -
 .../UpgradeUserKerberosDescriptorTest.java      |  19 +--
 .../src/test/python/TestStackFeature.py         |  44 +++++--
 .../python/custom_actions/test_ru_set_all.py    |   6 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |   2 +
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 ambari-web/app/controllers/main/service.js      |  13 +-
 ambari-web/app/utils/ajax/ajax.js               |  22 ++++
 .../test/controllers/main/service_test.js       |   4 +-
 51 files changed, 321 insertions(+), 1118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/267cd8b0/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------


[43/50] [abbrv] ambari git commit: AMBARI-21490 Ambari Schema Upgrade fails : Unknown column 'from_version' in 'field list' (dgrinenko)

Posted by jo...@apache.org.
AMBARI-21490 Ambari Schema Upgrade fails : Unknown column 'from_version' in 'field list' (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/651fe3d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/651fe3d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/651fe3d7

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 651fe3d706185992e107486ee69059ec8eccf909
Parents: 606c5ca
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Mon Jul 17 18:51:26 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Mon Jul 17 18:51:26 2017 +0300

----------------------------------------------------------------------
 .../apache/ambari/server/orm/DBAccessor.java    |  14 ++
 .../ambari/server/orm/DBAccessorImpl.java       |  24 +++
 .../server/upgrade/UpgradeCatalog220.java       | 197 +------------------
 .../server/upgrade/UpgradeCatalog252.java       |  11 +-
 4 files changed, 48 insertions(+), 198 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/651fe3d7/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
index 899426e..0313698 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
@@ -680,6 +680,20 @@ public interface DBAccessor {
   void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
        String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName, Object initialValue) throws SQLException;
 
+  /**
+   * Remove all rows from the table
+   * @param tableName name of the table
+   */
+  void clearTable(String tableName) throws SQLException;
+
+  /**
+   * Reset all rows with {@code value} for {@code columnName} column
+   * @param tableName name of the table
+   * @param columnName name of the column name to be update
+   * @param value data to use for update
+   */
+  void clearTableColumn(String tableName, String columnName, Object value) throws SQLException;
+
   enum DbType {
     ORACLE,
     MYSQL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/651fe3d7/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 2256d44..38d60e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -1442,4 +1442,28 @@ public class DBAccessorImpl implements DBAccessor {
       dropColumn(sourceTableName, sourceColumn.getName());
     }
   }
+
+  /**
+   * Remove all rows from the table
+   *
+   * @param tableName name of the table
+   */
+  @Override
+  public void clearTable(String tableName) throws SQLException{
+    String sqlQuery = "DELETE FROM " + convertObjectName(tableName);
+    executeQuery(sqlQuery);
+  }
+
+  /**
+   * Reset all rows with {@code value} for {@code columnName} column
+   *
+   * @param tableName  name of the table
+   * @param columnName
+   * @param value      data to use for update
+   */
+  @Override
+  public void clearTableColumn(String tableName, String columnName, Object value) throws SQLException {
+    String sqlQuery = String.format("UPDATE %s SET %s = ?", convertObjectName(tableName), convertObjectName(columnName));
+    executePreparedUpdate(sqlQuery, value);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/651fe3d7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
index 8ee7943..0bf60af 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
@@ -455,22 +455,10 @@ public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
   protected void executeStackUpgradeDDLUpdates() throws SQLException, AmbariException {
     final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
 
-    // Add columns
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
-      LOG.info("Adding upgrade_package column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
-    }
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
-      LOG.info("Adding upgrade_type column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
-    }
-
-    // Populate values in upgrade table.
-    boolean success = populateUpgradeTable();
+    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
+    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
 
-    if (!success) {
-      throw new AmbariException("Errors found while populating the upgrade table with values for columns upgrade_type and upgrade_package.");
-    }
+    // Tip: skipping data update as we clearing this table at UpgradeCatalog252 DDL
 
     if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
       LOG.info("Dropping upgrade_package column from repo_version table.");
@@ -500,184 +488,7 @@ public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
     }
   }
 
-  /**
-   * Populate the upgrade table with values for the columns upgrade_type and upgrade_package.
-   * The upgrade_type will default to {@code org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
-   * whereas the upgrade_package will be calculated.
-   * @return {@code} true on success, and {@code} false otherwise.
-   */
-  private boolean populateUpgradeTable() {
-    boolean success = true;
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        // Need to use SQL since the schema is changing and some of the columns have not yet been added..
-        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, from_version, to_version, direction, upgrade_package, upgrade_type FROM upgrade");
-        if (rs != null) {
-          try {
-            while (rs.next()) {
-              final long upgradeId = rs.getLong("upgrade_id");
-              final long clusterId = rs.getLong("cluster_id");
-              final String fromVersion = rs.getString("from_version");
-              final String toVersion = rs.getString("to_version");
-              final Direction direction = Direction.valueOf(rs.getString("direction"));
-              // These two values are likely null.
-              String upgradePackage = rs.getString("upgrade_package");
-              String upgradeType = rs.getString("upgrade_type");
-
-              LOG.info(MessageFormat.format("Populating rows for the upgrade table record with " +
-                  "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, from_version: {2}, to_version: {3}, direction: {4}",
-                upgradeId, clusterId, fromVersion, toVersion, direction));
-
-              // Set all upgrades that have been done so far to type "rolling"
-              if (StringUtils.isEmpty(upgradeType)) {
-                LOG.info("Updating the record's upgrade_type to " + UpgradeType.ROLLING);
-                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" + UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
-              }
-
-              if (StringUtils.isEmpty(upgradePackage)) {
-                String version = null;
-                StackEntity stack = null;
-
-                if (direction == Direction.UPGRADE) {
-                  version = toVersion;
-                } else if (direction == Direction.DOWNGRADE) {
-                  // TODO AMBARI-12698, this is going to be a problem.
-                  // During a downgrade, the "to_version" is overwritten to the source version, but the "from_version"
-                  // doesn't swap. E.g.,
-                  //  upgrade_id | from_version |  to_version  | direction
-                  // ------------+--------------+--------------+----------
-                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
-                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
-                  version = fromVersion;
-                }
-
-                ClusterEntity cluster = clusterDAO.findById(clusterId);
-
-                if (null != cluster) {
-                  stack = cluster.getDesiredStack();
-                  upgradePackage = calculateUpgradePackage(stack, version);
-                } else {
-                  LOG.error("Could not find a cluster with cluster_id " + clusterId);
-                }
-
-                if (!StringUtils.isEmpty(upgradePackage)) {
-                  LOG.info("Updating the record's upgrade_package to " + upgradePackage);
-                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package = '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
-                } else {
-                  success = false;
-                  LOG.error("Unable to populate column upgrade_package for record in table upgrade with id " + upgradeId);
-                }
-              }
-            }
-          } catch (Exception e) {
-            success = false;
-            e.printStackTrace();
-            LOG.error("Unable to populate the upgrade_type and upgrade_package columns of the upgrade table. " + e);
-          }
-        }
-      }
-    } catch (Exception e) {
-      success = false;
-      e.printStackTrace();
-      LOG.error("Failed to retrieve records from the upgrade table to populate the upgrade_type and upgrade_package columns. Exception: " + e);
-    } finally {
-      try {
-        if (rs != null) {
-          rs.close();
-        }
-        if (statement != null) {
-          statement.close();
-        }
-      } catch (SQLException e) {
-        ;
-      }
-    }
-    return success;
-  }
-
-  /**
-   * Find the single Repo Version for the given stack and version, and return
-   * its upgrade_package column. Because the upgrade_package column is going to
-   * be removed from this entity, must use raw SQL instead of the entity class.
-   * <p/>
-   * It's possible that there is an invalid version listed in the upgrade table.
-   * For example:
-   *
-   * <pre>
-   * upgrade
-   * 1 2 1295  2.2.0.0-2041  2.2.4.2-2     UPGRADE
-   * 2 2 1296  2.2.0.0-2041  2.2.0.0-2041  DOWNGRADE
-   * 3 2 1299  2.2.0.0-2041  2.2.4.2       UPGRADE
-   *
-   * repo_version
-   * 1  2.2.0.0-2041  HDP-2.2.0.0-2041  upgrade-2.2
-   * 2  2.2.4.2-2     HDP-2.2.4.2-2     upgrade-2.2
-   * </pre>
-   *
-   * Notice that it's possible for the {@code upgrade} table to include entries
-   * for a repo version which does not exist; {@code 2.2.4.2}. In these cases,
-   * this method will attempt a "best match".
-   *
-   * @param stack
-   *          Stack
-   * @param version
-   *          Stack version
-   * @return The value of the upgrade_package column, or null if not found.
-   */
-
-  private String calculateUpgradePackage(StackEntity stack, String version) {
-    String upgradePackage = null;
-    // Find the corresponding repo_version, and extract its upgrade_package
-    if (null != version && null != stack) {
-      RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
-
-      // a null repoVersion means there's mismatch between the upgrade and repo_version table;
-      // use a best-guess approach based on the Stack
-      if( null == repoVersion ){
-        List<RepositoryVersionEntity> bestMatches = repositoryVersionDAO.findByStack(new StackId(stack));
-        if (!bestMatches.isEmpty()) {
-          repoVersion = bestMatches.get(0);
-        }
-      }
-
-      // our efforts have failed; we have no idea what to use; return null as per the contract of the method
-      if( null == repoVersion ) {
-        return null;
-      }
-
-      Statement statement = null;
-      ResultSet rs = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          // Need to use SQL since the schema is changing and the entity will no longer have the upgrade_package column.
-          rs = statement.executeQuery("SELECT upgrade_package FROM repo_version WHERE repo_version_id = " + repoVersion.getId());
-          if (rs != null && rs.next()) {
-            upgradePackage = rs.getString("upgrade_package");
-          }
-        }
-      } catch (Exception e) {
-        LOG.error("Failed to retrieve upgrade_package for repo_version record with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
-      } finally {
-        try {
-          if (rs != null) {
-            rs.close();
-          }
-          if (statement != null) {
-            statement.close();
-          }
-        } catch (SQLException e) {
-          ;
-        }
-      }
-    }
-    return upgradePackage;
-  }
-
-  /**
+   /**
    * {@inheritDoc}
    */
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/651fe3d7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 6d2ab84..3c8686c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -127,11 +127,12 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   private void addRepositoryColumnsToUpgradeTable() throws SQLException {
-    dbAccessor.executeQuery(String.format("UPDATE %s SET %s = NULL", CLUSTERS_TABLE, UPGRADE_ID_COLUMN));
-    dbAccessor.executeQuery(String.format("DELETE FROM %s", SERVICE_COMPONENT_HISTORY_TABLE));
-    dbAccessor.executeQuery(String.format("DELETE FROM %s", UPGRADE_ITEM_TABLE));
-    dbAccessor.executeQuery(String.format("DELETE FROM %s", UPGRADE_GROUP_TABLE));
-    dbAccessor.executeQuery(String.format("DELETE FROM %s", UPGRADE_TABLE));
+    dbAccessor.clearTableColumn(CLUSTERS_TABLE, UPGRADE_ID_COLUMN, null);
+    dbAccessor.clearTable(SERVICE_COMPONENT_HISTORY_TABLE);
+    dbAccessor.clearTable(SERVICE_COMPONENT_HISTORY_TABLE);
+    dbAccessor.clearTable(UPGRADE_ITEM_TABLE);
+    dbAccessor.clearTable(UPGRADE_GROUP_TABLE);
+    dbAccessor.clearTable(UPGRADE_TABLE);
 
     dbAccessor.dropColumn(UPGRADE_TABLE, "to_version");
     dbAccessor.dropColumn(UPGRADE_TABLE, "from_version");


[38/50] [abbrv] ambari git commit: AMBARI-21486 Remove IOP Select button declares success even if command fails. (atkach)

Posted by jo...@apache.org.
AMBARI-21486 Remove IOP Select button declares success even if command fails. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/061467ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/061467ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/061467ba

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 061467bae47c4218437b0fed97d814ce4d43222b
Parents: 133baa5
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 17 14:22:56 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 17 14:22:56 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/messages.js                      |  1 -
 .../main/admin/stack_upgrade/versions_view.js   | 24 ++++++++------------
 2 files changed, 10 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/061467ba/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index a51f896..9ec64ef 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1710,7 +1710,6 @@ Em.I18n.translations = {
   'admin.stackVersions.version.notInstalled': "Not Installed",
   'admin.stackVersions.version.hostsInfoTooltip': "There are {0} hosts that do not need packages installed:<li>{1} Maintenance Mode</li><li>{2} Not Required</li>",
   'admin.stackVersions.removeIopSelect': "Remove IOP select",
-  'admin.stackVersions.removeIopSelect.done': "IOP select successfully removed.",
   'admin.stackVersions.removeIopSelect.fail': "IOP select remove failed",
   'admin.stackVersions.manageVersions': "Manage Versions",
   'admin.stackVersions.manageVersions.popup.body': 'You are about to leave the <b>Cluster Management</b> interface' +

http://git-wip-us.apache.org/repos/asf/ambari/blob/061467ba/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
index 2b57e04..699ee5f 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js
@@ -257,20 +257,16 @@ App.MainAdminStackVersionsView = Em.View.extend({
         data: {
           hosts: App.get('allHostNames').join(',')
         }
-      }).always(function(xhr, status) {
-        if (status === 'success') {
-          App.showAlertPopup(Em.I18n.t('admin.stackVersions.removeIopSelect'), Em.I18n.t('admin.stackVersions.removeIopSelect.done'));
-        } else if (status === 'error') {
-          App.ModalPopup.show({
-            header: Em.I18n.t('admin.stackVersions.removeIopSelect.fail'),
-            secondary: false,
-            bodyClass: App.AjaxDefaultErrorPopupBodyView.extend({
-              type: 'POST',
-              status: xhr.status,
-              message: xhr.responseText
-            })
-          });
-        }
+      }).fail(function(xhr) {
+        App.ModalPopup.show({
+          header: Em.I18n.t('admin.stackVersions.removeIopSelect.fail'),
+          secondary: false,
+          bodyClass: App.AjaxDefaultErrorPopupBodyView.extend({
+            type: 'POST',
+            status: xhr.status,
+            message: xhr.responseText
+          })
+        });
       });
     });
   }


[12/50] [abbrv] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
new file mode 100755
index 0000000..5dcc8e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from jkg_toree_params import py_executable, py_venv_pathprefix, py_venv_restrictive, venv_owner, ambarisudo
+import jnbg_helpers as helpers
+
+# Server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+package_dir = helpers.package_dir()
+cmd_file_name = "pythonenv_setup.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+# Sequence of commands executed in py_client.py
+commands = [ambarisudo + ' ' +
+            cmd_file_path + ' ' +
+            py_executable + ' ' +
+            py_venv_pathprefix + ' ' +
+            venv_owner]

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
new file mode 100755
index 0000000..d4d5f42
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class JupyterKernelGatewayServiceCheck(Script):
+    def service_check(self, env):
+        import jkg_toree_params as params
+        env.set_params(params)
+
+        if params.security_enabled:
+          jnbg_kinit_cmd = format("{kinit_path_local} -kt {jnbg_kerberos_keytab} {jnbg_kerberos_principal}; ")
+          Execute(jnbg_kinit_cmd, user=params.user)
+
+        scheme = "https" if params.ui_ssl_enabled else "http"
+        Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {scheme}://{jkg_host}:{jkg_port}/api/kernelspecs | grep 200"),
+                tries = 10,
+                try_sleep=3,
+                logoutput=True)
+        Execute(format("curl -s --negotiate -u: -k {scheme}://{jkg_host}:{jkg_port}/api/kernelspecs | grep Scala"),
+                tries = 10,
+                try_sleep=3,
+                logoutput=True)
+
+if __name__ == "__main__":
+    JupyterKernelGatewayServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
new file mode 100755
index 0000000..78d7a8a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+
+config = Script.get_config()
+jkg_pid_dir = config['configurations']['jnbg-env']['jkg_pid_dir_prefix']
+jkg_pid_file = format("{jkg_pid_dir}/jupyter_kernel_gateway.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
new file mode 100755
index 0000000..25261a3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- Local/offline install R baseurl -->
+  <property>
+    <name>Baseurl for local install of R and R packages dependencies</name>
+    <description>The baseurl of the repository for R and R packages. This is only needed during local or offline install.</description>
+    <value>http://</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- .Renviron -->
+  <property>
+    <name>Renviron</name>
+    <description>This is the jinja template for Renviron file</description>
+    <value>
+SPARK_HOME={{spark_home}}
+R4ML_SPARK_DRIVER_MEMORY={{spark_driver_memory}}
+SPARKR_SUBMIT_ARGS={{spark_submit_args}}
+R4ML_YARN_KEYTAB=
+R4ML_YARN_PRINCIPAL=
+R4ML_SYSML_JAR={{systemml_jar}}
+R_LIBS={{spark_home}}/R/lib:{{r4ml_home}}/R/lib
+    </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
new file mode 100755
index 0000000..214d577
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>R4ML</name>
+      <displayName>R4ML</displayName>
+      <comment>A scalable, hybrid approach to ML/Stats using R, SystemML and Apache Spark.</comment>
+      <version>0.8.0</version>
+      <components>
+        <component>
+          <name>R4ML</name>
+          <displayName>R4ML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK2/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SYSTEMML/SYSTEMML</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/r4ml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>r4ml_4_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>SPARK2</service>
+        <service>SYSTEMML</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
new file mode 100755
index 0000000..b7c9d1c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+args <- commandArgs(trailingOnly = TRUE)
+options(repos=c("http://cran.rstudio.com"))
+tryCatch({
+  if (suppressWarnings(!require(args[1], character.only=T))) install.packages(args[1])
+},
+warning = function(w) {print(w); ifelse(grepl("unable to resolve", w) || grepl("non-zero exit status", w), quit(save="no", status=1), quit(save="no", status=0))},
+error = function(e) quit(save="no", status=2))
+quit(save="no", status=0)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
new file mode 100755
index 0000000..c5ab359
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+tryCatch({
+  lib_loc <- file.path("/usr/iop/current/r4ml-client" , "R", "lib")
+  .libPaths(c(lib_loc, .libPaths()))
+  lib_loc <- file.path("/usr/iop/current/spark2-client" , "R", "lib")
+  .libPaths(c(lib_loc, .libPaths()))
+  library(R4ML)
+  r4ml.session()
+  r4ml.session.stop()
+}, warnings =  function(w) ifelse(grepl("validateTransformOptions", w), quit(save="no", status=0), quit(save="no", status=1)),
+error = function(e) {print(e); quit(save="no", status=2)})
+quit(save="no", status=0)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
new file mode 100755
index 0000000..d9117db
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[RREPO]
+name=RREPO
+baseurl=URLXXXX
+enabled=1
+gpgcheck=0
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
new file mode 100755
index 0000000..f23c3b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script.script import Script
+import os
+
+# temp directory
+exec_tmp_dir = Script.get_tmp_dir()
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+r4ml_home = format("{stack_root}/current/r4ml-client")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']
+r4ml_conf_dir = "/etc/r4ml/conf"
+if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
+  r4ml_conf_dir = format("{stack_root}/current/r4ml-client/conf")
+
+# environment variables
+spark_home = os.path.join(stack_root, "current", 'spark2-client')
+spark_driver_memory = "4G"
+spark_submit_args = "--num-executors 4 sparkr-shell"
+r4ml_auto_start = 0
+Renviron_template = config['configurations']['r4ml-env']['Renviron']
+
+# rpm links
+epel = ""
+centos = ""
+if System.get_instance().os_family == "redhat" :
+  if System.get_instance().os_major_version == "7" :
+    epel = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
+    if System.get_instance().machine == "x86_64" :
+      centos = "http://mirror.centos.org/centos/7/os/x86_64/Packages/"
+  else :
+    epel = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm"
+    if System.get_instance().machine == "x86_64" :
+      centos = "http://mirror.centos.org/centos/6/os/x86_64/Packages/"
+
+# local R and R packages baseurl
+baseurl = config['configurations']['r4ml-env']['Baseurl for local install of R and R packages dependencies']
+rrepo = "/etc/yum.repos.d/localr.repo"
+
+# systemml jar path
+systemml_jar = os.path.join(stack_root, "current", "systemml-client", "lib", "systemml.jar")
+if not os.path.isfile(systemml_jar) or not os.access(systemml_jar, os.R_OK) :
+  systemml_jar = ""
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
new file mode 100755
index 0000000..3dbce5c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import subprocess
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.logger import Logger
+
+class R4MLClient(Script):
+
+  def configure(selfself, env):
+    import params
+    env.set_params(params)
+
+  def get_component_name(self):
+    return "r4ml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      Logger.info("Executing R4ML Client Stack Upgrade pre-restart")
+      conf_select.select(params.stack_name, "r4ml", params.version)
+      stack_select.select("r4ml-client", params.version)
+
+  def stack_upgrade_save_new_config(self, env):
+    import params
+    env.set_params(params)
+
+    conf_select_name = "r4ml"
+    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
+
+    if config_dir:
+      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
+
+      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
+      # must change it now so this function can find the Jinja Templates for the service.
+      env.config.basedir = base_dir
+      conf_select.select(params.stack_name, conf_select_name, params.version)
+      self.configure(env, config_dir=config_dir)
+
+  def checkPackage(self, packages):
+    try :
+      checked_call("sudo yum list " + packages)
+    except Exception as e:
+      # ignore
+      print e
+      return 1
+    return 0
+
+  def setupEpelRepo(self, params):
+    epel_installed = False
+    import urllib
+    code = 0
+    try :
+      code = subprocess.call(["sudo", "which", "R"])
+    except Exception as e :
+      Logger.error(str(e))
+    if code != 0 :
+      # try to set up R repo
+      code = self.checkPackage("R")
+      if code != 0 :
+        # R does not exist in any repo
+        code = self.checkPackage("epel-release")
+        if code != 0 :
+          if params.epel != "" :
+            # proceed to install EPEL
+            try :
+              urllib.urlretrieve(params.epel, "/tmp/epel.rpm")
+              Execute(("yum", "install", "/tmp/epel.rpm", "-y"), sudo=True)
+              epel_installed = True
+            except Exception as e :
+              Logger.error(str(e))
+              # it is ok to fail to download as it can be an offline install case
+        else :
+          Execute(("yum", "install", "epel-release", "-y"), sudo=True)
+          epel_installed = True
+
+      # check another two dependencies
+      code = self.checkPackage("texinfo-tex texlive-epsf")
+      if code != 0 :
+        # download from centos mirror
+        if params.centos != "" :
+          try :
+            import re
+            urllib.urlretrieve(params.centos, "/tmp/index")
+            s = open("/tmp/index", "r").read()
+            tex = re.search('texinfo-tex(.+)rpm(?=\")', s).group(0)
+            epsf = re.search('texlive-epsf-svn(.+)rpm(?=\")', s).group(0)
+            urllib.urlretrieve(params.centos + tex, "/tmp/tex.rpm")
+            urllib.urlretrieve(params.centos + epsf, "/tmp/epsf.rpm")
+            Execute(("yum", "install", "/tmp/epsf.rpm", "/tmp/tex.rpm", "-y"), sudo=True)
+          except Exception as e :
+            Logger.error(str(e))
+        else :
+          Logger.error("Dependent packages texinfo-tex and texlive-epsf are not found in any repos. Enable RedHat Optional Packages repo or install these two packages manually before retry.")
+          exit(1)
+      # install R now
+      Execute(("yum", "install", "R", "-y"), sudo=True)
+    return epel_installed
+
+  def setupRrepo(self, params):
+    import re
+    if params.baseurl != "http://" :
+      # assume this is a local install
+      File(format(params.rrepo),
+           action="delete")
+
+      File(format(params.rrepo),
+           content = StaticFile("localr.repo"),
+           mode = 0644)
+      Execute(("sed", "-i", "s/URLXXXX/" + re.sub('\$', '\$', re.sub('/', '\/', params.baseurl)) + "/g ", params.rrepo),
+              sudo=True)
+      Logger.info("Local install R from %s." %params.baseurl)
+      # install R now
+      Execute(("yum", "install", "R", "-y"), sudo=True)
+      return False
+    else :
+      return self.setupEpelRepo(params)
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    # set up R repo
+    epel_installed = self.setupRrepo(params)
+
+    # install R and R4ML
+    self.install_packages(env)
+
+    # remove epel-release repo installed above as R has been installed
+    if epel_installed :
+      Execute(("yum", "remove", "epel-release", "-y"), sudo=True)
+    else :
+      if (os.path.exists(params.rrepo)) :
+        File(format(params.rrepo),
+             action="delete")
+
+    # install several R packages that will be used by R4ML functions
+    installR = params.exec_tmp_dir + "/Install.R"
+    File(format(installR),
+         content = StaticFile("Install.R"),
+         mode = 0755)
+
+    if (params.baseurl != "http://"):
+      import re
+      Execute(("sed", "-i", "s/repos=c(.*/repos=c(\"" + re.sub('\$', '\$', re.sub('/', '\/', params.baseurl)) + "\"))/g", installR), sudo=True)
+
+    # install the dependent packages
+    packages = ["R6", "uuid", "survival"]
+    for pkg in packages :
+      Execute(("Rscript", installR, pkg), sudo=True)
+
+    # set up configuration file
+    Directory(params.r4ml_conf_dir,
+              create_parents=True,
+              action="create",
+              mode=0755)
+
+    File(format("{r4ml_conf_dir}/Renviron"),
+         mode=0755,
+         content = InlineTemplate(params.Renviron_template))
+
+    # install R4ML package to /usr/iop/current/r4ml-client/R/lib directory
+    Directory(format(params.r4ml_home + "/R/lib"),
+              action="create",
+              create_parents=True,
+              mode=0755)
+
+    checked_call(format("sudo R_LIBS={spark_home}/R/lib R CMD INSTALL --install-tests --library={r4ml_home}/R/lib {r4ml_home}/R4ML_*.tar.gz"))
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  R4MLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
new file mode 100755
index 0000000..2acb4d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class R4MLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        # generate the service check file
+        scR = os.path.join(params.exec_tmp_dir, "ServiceCheck.R")
+        File(format(scR),
+             content = StaticFile("ServiceCheck.R"),
+             mode = 0755)
+
+        Execute(("Rscript", scR),
+                tries=120,
+                try_sleep=20,
+                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                logoutput=True,
+                user=params.smokeuser)
+
+if __name__ == "__main__":
+    R4MLServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
new file mode 100755
index 0000000..ecd503f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <displayName>SystemML</displayName>
+      <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
+      <version>0.10.0</version>
+      <components>
+        <component>
+          <name>SYSTEMML</name>
+          <displayName>SystemML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/systemml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>apache_systemml*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
new file mode 100755
index 0000000..dd7e46c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+systemml_home_dir = format("{stack_root}/current/systemml-client")
+systemml_lib_dir = format("{systemml_home_dir}/lib")
+systemml_scripts_dir = format("{systemml_home_dir}/scripts")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
new file mode 100755
index 0000000..c15b907
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class SystemMLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+        
+        if os.path.exists(params.systemml_lib_dir):
+            cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
+            java = format("{params.java_home}/bin/java")
+            command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
+            process = subprocess.Popen(command, stdout=subprocess.PIPE)
+            output = process.communicate()[0]
+            print output
+        
+            if 'Apache SystemML' not in output:
+                raise Fail("Expected output Apache SystemML not found.")
+
+if __name__ == "__main__":
+    SystemMLServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
new file mode 100755
index 0000000..2d45b68
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+#from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+
+class SystemMLClient(Script):
+
+  def get_component_name(self):
+    return "systemml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      #conf_select.select(params.stack_name, "systemml", params.version)
+      stack_select.select("systemml-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SystemMLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
new file mode 100755
index 0000000..5afab9c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
@@ -0,0 +1,33 @@
+ {
+  "TITAN": {
+    "TITAN_SERVER": [
+      {
+        "name": "titan",
+        "label": "titan server",
+        "description": "This host-level alert is triggered if the Titan Server Instance is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+	  "path": "TITAN/1.0.0/package/alerts/alert_check_titan_server.py",
+	  "parameters": [
+            {
+              "name": "titan.run.dir",
+              "display_name": "Run Directory",
+              "value": "/var/run/titan",
+              "type": "STRING",
+              "description": "The directory where titan server processe place its PID files."
+            },
+	    {
+              "name": "titan.user",
+              "display_name": "Titan User",
+              "value": "titan",
+              "type": "STRING",
+              "description": "User who starts the Titan Server process"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
new file mode 100755
index 0000000..1b33e6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration for Titan Server</description>
+    <value>
+host: {{titan_host}}
+port: {{titan_server_port}}
+threadPoolWorker: 1
+gremlinPool: 8
+scriptEvaluationTimeout: 300000
+serializedResponseTimeout: 300000
+channelizer: org.apache.tinkerpop.gremlin.server.channel.HttpChannelizer
+graphs: {
+  graph: conf/titan-hbase-solr.properties,
+  graphSpark: conf/hadoop-graph/hadoop-gryo.properties}
+plugins:
+  - aurelius.titan
+  - tinkerpop.spark
+  - tinkerpop.hadoop
+  - tinkerpop.tinkergraph
+scriptEngines: {
+  gremlin-groovy: {
+    imports: [java.lang.Math],
+    staticImports: [java.lang.Math.PI],
+    scripts: [scripts/empty-sample.groovy]},
+  nashorn: {
+      imports: [java.lang.Math],
+      staticImports: [java.lang.Math.PI]}}
+serializers:
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { useMapperFromGraph: graph }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { useMapperFromGraph: graph }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { useMapperFromGraph: graph }}
+processors:
+  - { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }}
+metrics: {
+  consoleReporter: {enabled: true, interval: 180000},
+  csvReporter: {enabled: true, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv},
+  jmxReporter: {enabled: true},
+  slf4jReporter: {enabled: true, interval: 180000},
+  gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
+  graphiteReporter: {enabled: false, interval: 180000}}
+threadPoolBoss: 1
+maxInitialLineLength: 4096
+maxHeaderSize: 8192
+maxChunkSize: 8192
+maxContentLength: 65536
+maxAccumulationBufferComponents: 1024
+resultIterationBatchSize: 64
+writeBufferHighWaterMark: 32768
+writeBufferHighWaterMark: 65536
+ssl: {
+  enabled: {{titan_server_ssl}}{{titan_server_ssl_key_file}}{{titan_server_ssl_key_password}}{{titan_server_ssl_key_cert_file}}
+}
+{{titan_server_simple_authenticator}}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
new file mode 100755
index 0000000..677fa2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration of hadoop-gryo.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.hadoop.structure.HadoopGraph
+gremlin.hadoop.graphInputFormat=org.apache.tinkerpop.gremlin.hadoop.structure.io.gryo.GryoInputFormat
+gremlin.hadoop.graphOutputFormat=org.apache.tinkerpop.gremlin.hadoop.structure.io.gryo.GryoOutputFormat
+gremlin.hadoop.jarsInDistributedCache=true
+
+gremlin.hadoop.inputLocation=data/tinkerpop-modern.kryo
+gremlin.hadoop.outputLocation=output
+
+#####################################
+# GiraphGraphComputer Configuration #
+#####################################
+giraph.minWorkers=2
+giraph.maxWorkers=2
+giraph.useOutOfCoreGraph=true
+giraph.useOutOfCoreMessages=true
+mapred.map.child.java.opts=-Xmx1024m
+mapred.reduce.child.java.opts=-Xmx1024m
+giraph.numInputThreads=4
+giraph.numComputeThreads=4
+# giraph.maxPartitionsInMemory=1
+# giraph.userPartitionCount=2
+
+####################################
+# SparkGraphComputer Configuration #
+####################################
+spark.master=yarn
+spark.submit.deployMode=client
+spark.yarn.jars={{default_fs}}/user/spark/share/lib/spark/*.jar
+
+# the Spark YARN ApplicationManager needs this to resolve classpath it sends to the executors
+spark.yarn.appMasterEnv.JAVA_HOME={{java64_home}}
+spark.yarn.appMasterEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.yarn.appMasterEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.yarn.am.extraJavaOptions=-Diop.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}
+
+# the Spark Executors (on the work nodes) needs this to resolve classpath to run Spark tasks
+spark.executorEnv.JAVA_HOME={{java64_home}}
+spark.executorEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.executorEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.executor.memory=2g
+spark.executor.extraClassPath={{hbase_config_dir}}
+spark.serializer=org.apache.spark.serializer.KryoSerializer
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
new file mode 100755
index 0000000..10b1d99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration of hadoop-hbase-read.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.hadoop.structure.HadoopGraph
+gremlin.hadoop.graphInputFormat=com.thinkaurelius.titan.hadoop.formats.hbase.HBaseInputFormat
+gremlin.hadoop.graphOutputFormat=org.apache.hadoop.mapreduce.lib.output.NullOutputFormat
+gremlin.hadoop.jarsInDistributedCache=true
+gremlin.hadoop.deriveMemory=false
+gremlin.hadoop.memoryOutputFormat=org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
+
+gremlin.hadoop.inputLocation=none
+gremlin.hadoop.outputLocation=output
+
+titanmr.ioformat.conf.storage.backend=hbase
+titanmr.ioformat.conf.storage.hbase.table=titan_solr
+titanmr.ioformat.conf.storage.hostname={{storage_host}}
+zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+#####################################
+# GiraphGraphComputer Configuration #
+#####################################
+giraph.minWorkers=2
+giraph.maxWorkers=2
+giraph.useOutOfCoreGraph=true
+giraph.useOutOfCoreMessages=true
+mapred.map.child.java.opts=-Xmx1024m
+mapred.reduce.child.java.opts=-Xmx1024m
+giraph.numInputThreads=4
+giraph.numComputeThreads=4
+# giraph.maxPartitionsInMemory=1
+# giraph.userPartitionCount=2
+
+####################################
+# SparkGraphComputer Configuration #
+####################################
+spark.master=yarn
+spark.submit.deployMode=client
+spark.yarn.jars={{default_fs}}/user/spark/share/lib/spark/*.jar
+
+# the Spark YARN ApplicationManager needs this to resolve classpath it sends to the executors
+spark.yarn.appMasterEnv.JAVA_HOME={{java64_home}}
+spark.yarn.appMasterEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.yarn.appMasterEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.yarn.am.extraJavaOptions=-Diop.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}
+
+# the Spark Executors (on the work nodes) needs this to resolve classpath to run Spark tasks
+spark.executorEnv.JAVA_HOME={{java64_home}}
+spark.executorEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.executorEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.executor.memory=2g
+spark.executor.extraClassPath={{hbase_config_dir}}
+
+spark.serializer=org.apache.spark.serializer.KryoSerializer
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
new file mode 100755
index 0000000..9611764
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>titan_user</name>
+    <display-name>Titan User</display-name>
+    <description>User to run Titan as</description>
+    <property-type>USER</property-type>
+    <value>titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_pid_dir</name>
+    <value>/var/run/titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_log_dir</name>
+    <value>/var/log/titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_server_port</name>
+    <value>8182</value>
+    <description>Sets the port Titan Server binds to, default is 8182</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_hdfs_home_dir</name>
+    <value>/apps/titan/data</value>
+    <description>A root location in HDFS for Titan to write collection data to. Rather than specifying an HDFS location for the data directory or update log directory, use this to specify one root location and have everything automatically created within this HDFS</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>SimpleAuthenticator</name>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <description>Set whether to enable SimpleAuthenticator(PLAIN SASL), the credentials database is located at /usr/iop/current/titan-client/data/credentials.kryo. Note: this option will not take effect if Knox is installed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.enabled</name>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <description>Set whether to enable ssl</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyCertChainFile</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The X.509 certificate chain file in PEM format. If this value is not present and ssl.enabled is true a self-signed certificate will be used (not suitable for production).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyFile</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The PKCS#8 private key file in PEM format. If this value is not present and ssl.enabled is true a self-signed certificate will be used (not suitable for production).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyPassword</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The password of the keyFile if it's not password-protected.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>titan-env template</display-name>
+    <description>This is the template for titan-env.sh file</description>
+    <on-ambari-upgrade add="false"/>
+    <value>
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Add hadoop configuration directory into classpath
+export HADOOP_CONF_DIR={{hadoop_config_dir}}
+
+
+# Setup the environment for SparkGraphComputer
+# Add yarn and spark lib and config into classpath for SparkGraphComputer.
+export YARN_HOME={{yarn_home_dir}}
+export YARN_CONF_DIR=$HADOOP_CONF_DIR
+export SPARK_HOME={{spark_home_dir}}
+export SPARK_CONF_DIR={{spark_config_dir}}
+export TITAN_HOME={{titan_home_dir}}
+export CLASSPATH=$CLASSPATH:$HADOOP_CONF_DIR:$SPARK_CONF_DIR:$TITAN_HOME/conf
+
+#add hbase configuration directory into classpath
+if ([ -d "{{hbase_config_dir}}" ]); then
+   export HBASE_CONF_DIR={{hbase_config_dir}}
+   export CLASSPATH=$CLASSPATH:$HBASE_CONF_DIR
+fi
+
+if ([[ ! -d "{{titan_ext_spark_plugin_dir}}" ]] &amp;&amp; [[ -d "$SPARK_HOME/jars" ]]); then
+  for jar in $SPARK_HOME/jars/*.jar; do
+    if ([[ $jar != */guava*.jar ]] &amp;&amp; [[ $jar != */slf4j-log4j12*.jar ]] &amp;&amp; [[ $jar != */spark-core*.jar ]]) ;then
+      CLASSPATH=$CLASSPATH:$jar
+    fi
+  done
+fi
+
+export CLASSPATH
+
+# Add iop.version and native lib in java opt for hadoop config.
+export IOP_JAVA_OPTIONS="$JAVA_OPTIONS -D{{platform_name}}.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}"
+
+{% if security_enabled -%}
+export JVMFLAGS="-Djava.security.auth.login.config={{titan_solr_client_jaas_file}}"
+export IOP_JAVA_OPTIONS="$IOP_JAVA_OPTIONS $JVMFLAGS"
+{% endif %}
+
+source "$HADOOP_CONF_DIR"/hadoop-env.sh
+export HADOOP_GREMLIN_LIBS=$TITAN_HOME/lib
+export TITAN_LOGDIR={{titan_log_dir}}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
new file mode 100755
index 0000000..49e3f80
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Describe the configurations for Solr</description>
+    <on-ambari-upgrade add="false"/>
+    <value># Titan configuration sample: HBase and Solr
+# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
+
+# This file connects to HBase using a Zookeeper quorum
+# (storage.hostname) consisting solely of localhost. It also
+# connects to Solr running on localhost using Solr's HTTP API.
+# Zookeeper, the HBase services, and Solr must already be
+# running and available before starting Titan with this file.
+gremlin.graph=com.thinkaurelius.titan.core.TitanFactory
+storage.backend=hbase
+storage.hostname={{storage_host}}
+storage.hbase.table=titan_solr
+storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+cache.db-cache = true
+cache.db-cache-clean-wait = 20
+cache.db-cache-time = 180000
+cache.db-cache-size = 0.5
+
+# The indexing backend used to extend and optimize Titan's query
+# functionality. This setting is optional. Titan can use multiple
+# heterogeneous index backends. Hence, this option can appear more than
+# once, so long as the user-defined name between "index" and "backend" is
+# unique among appearances.Similar to the storage backend, this should be
+# set to one of Titan's built-in shorthand names for its standard index
+# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
+# package and classname of a custom/third-party IndexProvider
+# implementation.
+
+index.search.backend=solr
+index.search.solr.mode=cloud
+index.search.solr.zookeeper-url={{zookeeper_solr_for_titan_hostname}}
+index.search.solr.configset=titan
+{{titan_solr_client_jaas_config}}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
new file mode 100755
index 0000000..c32a9e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j-console.properties</description>
+    <on-ambari-upgrade add="false"/>
+    <value>
+      # Used by gremlin.sh
+
+      log4j.appender.A2=org.apache.log4j.ConsoleAppender
+      log4j.appender.A2.Threshold=TRACE
+      log4j.appender.A2.layout=org.apache.log4j.PatternLayout
+      log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
+      log4j.rootLogger=${gremlin.log4j.level}, A2
+
+      #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
+      #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
+
+      # Disable spurious Hadoop config deprecation warnings under 2.2.0.
+      #
+      # See https://issues.apache.org/jira/browse/HADOOP-10178
+      #
+      # This can and should be deleted when we upgrade our Hadoop 2.2.0
+      # dependency to 2.3.0 or 3.0.0.
+      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
+
+      # Configure MR at its own loglevel. We usually want MR at INFO,
+      # even if the rest of the loggers are at WARN or ERROR or FATAL,
+      # because job progress information is at INFO.
+      log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
+      log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
+
+      # This generates 3 INFO lines per jar on the classpath -- usually more
+      # noise than desirable in the REPL. Switching it to the default
+      # log4j level means it will be at WARN by default, which is ideal.
+      log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
new file mode 100755
index 0000000..a25382e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
@@ -0,0 +1,52 @@
+{
+  "services": [
+    {
+      "name": "TITAN",
+      "components": [
+        {
+          "name": "TITAN_SERVER",
+          "identities": [
+            {
+              "name": "titan_principal",
+              "principal": {
+                "value": "${titan-env/titan_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "titan-env/titan_principal_name",
+                "local_username": "${titan-env/titan_user}"
+
+              },
+              "keytab": {
+                "file": "${keytab_dir}/titan.service.keytab",
+                "owner": {
+                  "name": "${titan-env/titan_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "titan-env/titan_keytab_path"
+              }
+            }
+          ]
+        },
+        {
+          "name": "TITAN_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.proxyuser.titan.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.proxyuser.titan.hosts": "*"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
new file mode 100755
index 0000000..75696c1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
@@ -0,0 +1,124 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <displayName>Titan</displayName>
+      <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
+        billions of vertices and edges distributed across a multi-machine cluster.</comment>
+      <version>1.0.0</version>
+      <components>
+        <component>
+          <name>TITAN_SERVER</name>
+          <displayName>Titan Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/titan_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>yaml</type>
+              <fileName>gremlin-server.yaml</fileName>
+              <dictionaryName>gremlin-server</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+        <component>
+          <name>TITAN_CLIENT</name>
+          <displayName>Titan Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/titan_client.py</script>
+            <scriptType>PYTHON</scriptType>
+	        <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+               <type>env</type>
+               <fileName>titan-env.sh</fileName>
+               <dictionaryName>titan-env</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>log4j-console.properties</fileName>
+                <dictionaryName>titan-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>titan-hbase-solr.properties</fileName>
+                <dictionaryName>titan-hbase-solr</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-gryo.properties</fileName>
+              <dictionaryName>hadoop-gryo</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-hbase-read.properties</fileName>
+              <dictionaryName>hadoop-hbase-read</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan</name>
+            </package>
+	        <package>
+	          <name>ambari-infra-solr-client-*</name>
+	        </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>		
+        <service>HDFS</service>
+        <service>HBASE</service>
+        <service>SOLR</service>
+        <service>SPARK2</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+	<config-type>gremlin-server</config-type>
+        <config-type>titan-env</config-type>
+        <config-type>titan-hbase-solr</config-type>
+        <config-type>titan-log4j</config-type>
+        <config-type>hadoop-gryo</config-type>
+        <config-type>hadoop-hbase-read</config-type>
+	<config-type>knox-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
new file mode 100755
index 0000000..8342c51
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
@@ -0,0 +1,65 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import Execute
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.functions import format
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+STACK_ROOT = '{{cluster-env/stack_root}}'
+TITAN_RUN_DIR = 'titan.run.dir'
+TITAN_USER = 'titan.user'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  titan_bin_dir = configurations[STACK_ROOT] + format("/current/titan-server/bin")
+
+  gremlin_server_script_path = titan_bin_dir + format("/gremlin-server-script.sh")
+  
+  titan_pid_file = parameters[TITAN_RUN_DIR] + format("/titan.pid")
+  titan_user = parameters[TITAN_USER]
+  (code, msg) = get_check_result(gremlin_server_script_path, titan_pid_file, titan_user)
+  return (code, msg)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (STACK_ROOT, TITAN_RUN_DIR)
+
+def get_check_result(gremlin_server_script_path, titan_pid_file, titan_user):
+  cmd = format("{gremlin_server_script_path} status {titan_pid_file}")
+  try:
+    result = Execute(cmd,
+                     user=titan_user
+                     )
+    return (RESULT_CODE_OK, ["titan server is up and running"])
+  except Exception, ex:
+    return (RESULT_CODE_CRITICAL, [ex])
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
new file mode 100755
index 0000000..97aa897
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
@@ -0,0 +1,86 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Init script for Gremlin Server so it automatically starts/stops with the machine.
+#
+# To install:
+# 1)  Add a symlink to this file in /etc/init.d/ under the name you'd like to see the service
+#     For example, to name the service "gremlin-server": ln -s /usr/local/packages/dynamodb-titan100-storage-backend-1.0.0-hadoop1/bin/gremlin-server-service.sh /etc/init.d/gremlin-server
+# 2a) If you're running RH: chkconfig --add gremlin-server
+# 2b) If you're running Ubuntu: update-rc.d gremlin-server defaults
+#
+# You have to SET the Gremlin Server installation directory here:
+PID_FILE=$2
+GREMLIN_SERVER_LOG_FILE=$3
+GREMLIN_SERVER_ERR_FILE=$4
+GREMLIN_SERVER_BIN_DIR=$5
+GREMLIN_SERVER_CONF_DIR=$6
+
+
+usage() {
+  echo "Usage: `basename $0`: start|stop|status"
+  exit 1
+}
+
+status() {
+  echo "get program status"
+  local pid
+  if [[ -f "$PID_FILE" && -s "$PID_FILE" ]]; then
+  	#statements
+        pid=$(cat $PID_FILE)
+  	if kill -0 $pid > /dev/null 2>&1; then
+  		# pid exists
+                echo "program is running"
+  		return 0
+  	fi
+  else
+  	echo "program is not running"
+  fi
+  return 1
+}
+
+start() {
+  if ! status ; then
+      echo "start program"
+      /usr/bin/nohup ${GREMLIN_SERVER_BIN_DIR}/gremlin-server.sh ${GREMLIN_SERVER_CONF_DIR}/gremlin-server.yaml 1>$GREMLIN_SERVER_LOG_FILE 2>${GREMLIN_SERVER_ERR_FILE} &
+      echo $! > $PID_FILE
+      sleep 50
+  fi
+}
+
+stop() {
+	local pid
+	if status ; then
+		echo "stop program"
+		pid=`cat $PID_FILE`
+		kill -9 $pid
+                rm -f $PID_FILE
+	fi
+}
+
+case "$1" in
+	start)
+  start
+  ;;
+  stop)
+  stop
+  ;;
+  status)
+  status
+  ;;
+  *)
+  usage
+  ;;
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
new file mode 100755
index 0000000..0e68eeeb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph
+gremlin.tinkergraph.vertexIdManager=LONG


[19/50] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-21348' into branch-2.5

Posted by jo...@apache.org.
Merge branch 'branch-feature-AMBARI-21348' into branch-2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/65e57a12
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/65e57a12
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/65e57a12

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 65e57a12b98043ec0a0493ddcdcb3c47d2b75215
Parents: a389f85 133baa5
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Jul 13 12:40:25 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jul 13 12:40:25 2017 -0400

----------------------------------------------------------------------
 .../resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml    | 2 +-
 .../resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml  | 2 +-
 .../resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml | 2 +-
 .../resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml | 2 +-
 .../resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml    | 2 +-
 .../resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml   | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------



[35/50] [abbrv] ambari git commit: Revert: BUG-78694. LDAP sync requires user to be root

Posted by jo...@apache.org.
Revert: BUG-78694. LDAP sync requires user to be root


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/805dbe42
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/805dbe42
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/805dbe42

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 805dbe42a0c809faacf8b86c769199c300eac1b9
Parents: aa729a5
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Sun Jul 16 20:37:52 2017 +0300
Committer: Eugene Chekanskiy <ec...@hortonworks.com>
Committed: Sun Jul 16 20:37:52 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_server/setupSecurity.py     |  4 ++++
 ambari-server/src/test/python/TestAmbariServer.py      | 13 ++++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/805dbe42/ambari-server/src/main/python/ambari_server/setupSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
index f175d7c..ea3b9e5 100644
--- a/ambari-server/src/main/python/ambari_server/setupSecurity.py
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -275,6 +275,10 @@ class LdapSyncOptions:
 #
 def sync_ldap(options):
   logger.info("Sync users and groups with configured LDAP.")
+  if not is_root():
+    err = 'Ambari-server sync-ldap should be run with ' \
+          'root-level privileges'
+    raise FatalException(4, err)
 
   properties = get_ambari_properties()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/805dbe42/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index fb0bb70..1ac77ab2 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -7747,12 +7747,13 @@ class TestAmbariServer(TestCase):
   @patch("urllib2.urlopen")
   @patch("urllib2.Request")
   @patch("base64.encodestring")
+  @patch("ambari_server.setupSecurity.is_root")
   @patch("ambari_server.setupSecurity.is_server_runing")
   @patch("ambari_server.setupSecurity.get_ambari_properties")
   @patch("ambari_server.setupSecurity.get_validated_string_input")
   @patch("ambari_server.setupSecurity.logger")
   def test_sync_ldap_forbidden(self, logger_mock, get_validated_string_input_method, get_ambari_properties_method,
-                                is_server_runing_method,
+                                is_server_runing_method, is_root_method,
                                 encodestring_method, request_constructor, urlopen_method):
 
     options = self._create_empty_options_mock()
@@ -7761,6 +7762,16 @@ class TestAmbariServer(TestCase):
     options.ldap_sync_users = None
     options.ldap_sync_groups = None
 
+    is_root_method.return_value = False
+    try:
+      sync_ldap(options)
+      self.fail("Should throw exception if not root")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("root-level" in fe.reason)
+      pass
+    is_root_method.return_value = True
+
     is_server_runing_method.return_value = (None, None)
     try:
       sync_ldap(options)


[05/50] [abbrv] ambari git commit: Merge branch 'branch-feature-AMBARI-21348' into branch-2.5

Posted by jo...@apache.org.
Merge branch 'branch-feature-AMBARI-21348' into branch-2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/da44c5c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/da44c5c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/da44c5c1

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: da44c5c1a5476b78887cd7729d0efc5afbf0dae9
Parents: a6ac40b 267cd8b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 14:47:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:47:40 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/controller/internal/UpgradeResourceProvider.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------



[32/50] [abbrv] ambari git commit: AMBARI-21482. Blueprints: HSI config 'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated and recommended via Stack Advisor during Blueprint install only if 'num_llap_nodes' config value is not pr

Posted by jo...@apache.org.
AMBARI-21482. Blueprints: HSI config 'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated and recommended via Stack Advisor during Blueprint install only if 'num_llap_nodes' config value is not provided in Blueprint.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/20768014
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/20768014
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/20768014

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 207680147edc50336bae04914976e6221bc6fd95
Parents: 4ddbd62
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Fri Jul 14 18:15:52 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Fri Jul 14 22:29:23 2017 -0700

----------------------------------------------------------------------
 .../main/resources/stacks/HDP/2.5/services/stack_advisor.py    | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/20768014/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 97c49f3..9971bfa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1014,8 +1014,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       # Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
 
       # Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
-      # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case)
-      if (changed_configs_has_enable_hive_int or  0 == len(services['changed-configurations'])) \
+      # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case and 'num_llap_nodes' not defined)
+      if (changed_configs_has_enable_hive_int
+          or (0 == len(services['changed-configurations'])
+              and not services['configurations']['hive-interactive-env']['properties']['num_llap_nodes'])) \
         and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
         num_llap_nodes_requested = min_nodes_required
       else:


[09/50] [abbrv] ambari git commit: AMBARI-21453 Restart Hive after Install Packages fails with err on Condition with name 'should_install_mysl_connector' (dili)

Posted by jo...@apache.org.
AMBARI-21453 Restart Hive after Install Packages fails with err on Condition with name 'should_install_mysl_connector' (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c5f2efa0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c5f2efa0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c5f2efa0

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: c5f2efa0b870590aeb28baaefed2fac8d88bf38d
Parents: 267cd8b
Author: Di Li <di...@apache.org>
Authored: Wed Jul 12 16:38:47 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Wed Jul 12 16:38:47 2017 -0400

----------------------------------------------------------------------
 .../resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml    | 2 +-
 .../resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml  | 2 +-
 .../resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml | 2 +-
 .../resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml    | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c5f2efa0/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
index 44e63e7..dc1a5b4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
@@ -267,7 +267,7 @@
             </package-->
             <package>
               <name>mysql-connector-java</name>
-              <condition>should_install_mysl_connector</condition>
+              <condition>should_install_mysql_connector</condition>
             </package>
           </packages>
         </osSpecific>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5f2efa0/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml
index 8f703a5..ec75b4b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/metainfo.xml
@@ -168,7 +168,7 @@
             <package>
               <name>mysql-connector-java</name>
               <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_mysl_connector</condition>
+              <condition>should_install_mysql_connector</condition>
             </package>
           </packages>
         </osSpecific>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5f2efa0/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml
index 05856fd..87347eb 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SQOOP/metainfo.xml
@@ -37,7 +37,7 @@
             <package>
               <name>mysql-connector-java</name>
               <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_mysl_connector</condition>
+              <condition>should_install_mysql_connector</condition>
             </package>
           </packages>
         </osSpecific>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c5f2efa0/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml
index d89a7d6..5eb51b7 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/metainfo.xml
@@ -271,7 +271,7 @@
             <package>
               <name>mysql-connector-java</name>
               <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_mysl_connector</condition>
+              <condition>should_install_mysql_connector</condition>
             </package>
           </packages>
         </osSpecific>


[17/50] [abbrv] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights - addendum: schema-validity

Posted by jo...@apache.org.
AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights - addendum: schema-validity


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a389f85b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a389f85b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a389f85b

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: a389f85b6e2a0cb23ff7fcf629fd55ab4e203560
Parents: a7b6d5a
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jul 13 18:10:09 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jul 13 18:10:31 2017 +0200

----------------------------------------------------------------------
 .../common-services/JNBG/0.2.0/configuration/jnbg-env.xml          | 1 +
 .../BigInsights/4.2/services/TITAN/configuration/titan-env.xml     | 2 ++
 .../4.2/services/TITAN/configuration/titan-hbase-solr.xml          | 1 +
 .../BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml   | 1 +
 4 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
index f9da01e..ed49b26 100755
--- a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
@@ -75,6 +75,7 @@
     <value>/apps/jnbg/spark-warehouse</value>
     <display-name>spark.sql.warehouse.dir</display-name>
     <description>Warehouse for Notebook applications</description>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>jkg_port</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
index 86e09f1..dda05e4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
@@ -27,6 +27,7 @@
     <description>User to run Titan as</description>
     <property-type>USER</property-type>
     <value>titan</value>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -41,6 +42,7 @@ export HADOOP_CONF_DIR={{hadoop_config_dir}}
 export HBASE_CONF_DIR={{hbase_config_dir}}
 CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
     </value>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
index 0ca6807..2a7b366 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
@@ -61,6 +61,7 @@ index.search.solr.configset=titan
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
index 3363d81..a5522f3 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
@@ -60,6 +60,7 @@
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>


[34/50] [abbrv] ambari git commit: AMBARI-21483. Add UID/GID related enhancements (echekanskiy)

Posted by jo...@apache.org.
AMBARI-21483. Add UID/GID related enhancements (echekanskiy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa729a5b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa729a5b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa729a5b

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: aa729a5bb1f48df2eeddb7234373b0b90b850bc3
Parents: d5392fd
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Sun Jul 16 20:14:22 2017 +0300
Committer: Eugene Chekanskiy <ec...@hortonworks.com>
Committed: Sun Jul 16 20:14:22 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/state/PropertyInfo.java       |   2 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |  13 +-
 .../before-ANY/scripts/shared_initialization.py |  45 ++-
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 294 +++++++++++--------
 .../app/controllers/wizard/step7_controller.js  |  67 +++++
 .../configs/stack_config_properties_mapper.js   |  14 +-
 ambari-web/app/styles/application.less          |  15 +
 ...ontrols_service_config_usergroup_with_id.hbs |  27 ++
 ambari-web/app/utils/config.js                  |   3 +
 .../configs/service_configs_by_category_view.js |   6 +
 ambari-web/app/views/common/controls_view.js    |  39 +++
 11 files changed, 392 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 2ad92fd..bc89fc4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -270,7 +270,9 @@ public class PropertyInfo {
   public enum PropertyType {
     PASSWORD,
     USER,
+    UID,
     GROUP,
+    GID,
     TEXT,
     ADDITIONAL_USER_PROPERTY,
     NOT_MANAGED_HDFS_PATH,

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
index 08542c4..4663f10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
@@ -21,6 +21,7 @@
 
 username=$1
 directories=$2
+newUid=$3
 
 function find_available_uid() {
  for ((i=1001; i<=2000; i++))
@@ -34,7 +35,16 @@ function find_available_uid() {
  done
 }
 
-find_available_uid
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid
+  fi
+  echo $newUid
+  exit 0
+fi
 
 if [ $newUid -eq 0 ]
 then
@@ -43,7 +53,6 @@ then
 fi
 
 set -e
-
 dir_array=($(echo $directories | sed 's/,/\n/g'))
 old_uid=$(id -u $username)
 sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 4d0de7f..886bc45 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -24,6 +24,7 @@ import tempfile
 from copy import copy
 from resource_management.libraries.functions.version import compare_versions
 from resource_management import *
+from resource_management.core import shell
 
 def setup_users():
   """
@@ -43,11 +44,17 @@ def setup_users():
       )
 
     for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
+      if params.override_uid == "true":
+        User(user,
+             uid = get_uid(user),
+             gid = params.user_to_gid_dict[user],
+             groups = params.user_to_groups_dict[user],
+             )
+      else:
+        User(user,
+             gid = params.user_to_gid_dict[user],
+             groups = params.user_to_groups_dict[user],
+             )
 
     if params.override_uid == "true":
       set_uid(params.smoke_user, params.smoke_user_dirs)
@@ -65,6 +72,7 @@ def setup_users():
                create_parents = True,
                cd_access="a",
     )
+
     if params.override_uid == "true":
       set_uid(params.hbase_user, params.hbase_user_dirs)
     else:
@@ -125,7 +133,7 @@ def create_users_and_groups(user_and_groups):
     Group(copy(groups_list),
     )
   return groups_list
-    
+
 def set_uid(user, user_dirs):
   """
   user_dirs - comma separated directories
@@ -136,9 +144,30 @@ def set_uid(user, user_dirs):
        content=StaticFile("changeToSecureUid.sh"),
        mode=0555)
   ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+  uid = get_uid(user)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
           not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
+
+def get_uid(user):
+  import params
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid
+  else:
+    if user == params.smoke_user:
+      return 0
+    File(format("{tmp_dir}/changeUid.sh"),
+         content=StaticFile("changeToSecureUid.sh"),
+         mode=0555)
+    conde, newUid = shell.call((format("{tmp_dir}/changeUid.sh"), format("{user}")), sudo=True)
+    return newUid
+
 def setup_hadoop_env():
   import params
   stackversion = params.stack_version_unformatted

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 75c6543..1d2351f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -21,6 +21,7 @@ limitations under the License.
 from stacks.utils.RMFTestCase import *
 from mock.mock import MagicMock, call, patch
 from resource_management import Hook
+import itertools
 import getpass
 import os
 
@@ -45,147 +46,201 @@ class TestHookBeforeInstall(RMFTestCase):
     self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
                        classname="BeforeAnyHook",
                        command="hook",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Group', 'hadoop',
-    )
-    self.assertResourceCalled('Group', 'nobody',
-    )
-    self.assertResourceCalled('Group', 'users',
+                       config_file="default.json",
+                       call_mocks=itertools.cycle([(0, "1000")])
     )
+    self.assertResourceCalled('Group', 'hadoop',)
+    self.assertResourceCalled('Group', 'nobody',)
+    self.assertResourceCalled('Group', 'users',)
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hive',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'oozie',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'nobody',
-        gid = 'hadoop',
-        groups = [u'nobody'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'nobody'],
+                              )
     self.assertResourceCalled('User', 'ambari-qa',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = 0,
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'flume',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hdfs',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'storm',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'mapred',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hbase',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'tez',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'zookeeper',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'falcon',
-        gid = 'hadoop',
-        groups = [u'users'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'users'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'sqoop',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'yarn',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
     self.assertResourceCalled('User', 'hcat',
-        gid = 'hadoop',
-        groups = [u'hadoop'],
-        fetch_nonlocal_groups = True,
-    )
+                              gid = 'hadoop',
+                              uid = '1000',
+                              groups = [u'hadoop'],
+                              )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
-        content = StaticFile('changeToSecureUid.sh'),
-        mode = 0555,
-    )
-    self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa',
-        not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
-    )
-    self.assertResourceCalled('Directory', self.TMP_PATH,
-        owner = 'hbase',
-        mode = 0775,
-        create_parents = True,
-        cd_access='a'
-    )
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0',
+                              not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
+                              )
+    self.assertResourceCalled('Directory', '/tmp/hbase-hbase',
+                              owner = 'hbase',
+                              create_parents = True,
+                              mode = 0775,
+                              cd_access = 'a',
+                              )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
-        content = StaticFile('changeToSecureUid.sh'),
-        mode = 0555,
-    )
-    self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,' + self.TMP_PATH,
-        not_if = '(test $(id -u hbase) -gt 1000) || (false)',
-    )
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content = StaticFile('changeToSecureUid.sh'),
+                              mode = 0555,
+                              )
+    self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/tmp/hbase-hbase 1000',
+                              not_if = '(test $(id -u hbase) -gt 1000) || (false)',
+                              )
     self.assertResourceCalled('User', 'test_user1',
-        fetch_nonlocal_groups = True,
-    )
+                              fetch_nonlocal_groups = True,
+                              )
     self.assertResourceCalled('User', 'test_user2',
-        fetch_nonlocal_groups = True,
-    )
-    self.assertResourceCalled('Group', 'hdfs',
-    )
-    self.assertResourceCalled('Group', 'test_group',
-    )
+                              fetch_nonlocal_groups = True,
+                              )
+    self.assertResourceCalled('Group', 'hdfs',)
+    self.assertResourceCalled('Group', 'test_group',)
     self.assertResourceCalled('User', 'hdfs',
-        groups = [u'hadoop', u'hdfs', u'test_group'],
-        fetch_nonlocal_groups = True,
-    )
+                              fetch_nonlocal_groups = True,
+                              groups = [u'hadoop', u'hdfs', u'test_group'],
+                              )
     self.assertResourceCalled('Directory', '/etc/hadoop',
-        mode = 0755
-    )
+                              mode = 0755,
+                              )
     self.assertResourceCalled('Directory', '/etc/hadoop/conf.empty',
-        owner = 'root',
-        group = 'hadoop',
-        create_parents = True,
-    )
+                              owner = 'root',
+                              create_parents = True,
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Link', '/etc/hadoop/conf',
-        not_if = 'ls /etc/hadoop/conf',
-        to = '/etc/hadoop/conf.empty',
-    )
+                              not_if = 'ls /etc/hadoop/conf',
+                              to = '/etc/hadoop/conf.empty',
+                              )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-env.sh',
-        content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
-        owner = 'hdfs',
-        group = 'hadoop'
+                              content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
+                              owner = 'hdfs',
+                              group = 'hadoop'
     )
     self.assertResourceCalled('Directory', '/tmp/hadoop_java_io_tmpdir',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              mode = 01777
-    )
-
+                              mode = 01777,
+                              )
     self.assertResourceCalled('Directory', '/tmp/AMBARI-artifacts/',
                               create_parents = True,
                               )
@@ -198,20 +253,17 @@ class TestHookBeforeInstall(RMFTestCase):
                               )
     self.assertResourceCalled('Directory', '/usr/jdk64',)
     self.assertResourceCalled('Execute', ('chmod', 'a+x', u'/usr/jdk64'),
-                              sudo = True
-                              )
-    self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64'
+                              sudo = True,
                               )
+    self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64',)
     self.assertResourceCalled('Directory', '/tmp/jdk_tmp_dir',
-                              action = ['delete']
+                              action = ['delete'],
                               )
-
     self.assertResourceCalled('File', '/usr/jdk64/jdk1.7.0_45/bin/java',
                               mode = 0755,
-                              cd_access = "a",
+                              cd_access = 'a',
                               )
     self.assertResourceCalled('Execute', ('chmod', '-R', '755', u'/usr/jdk64/jdk1.7.0_45'),
-      sudo = True,
-    )
-
+                              sudo = True,
+                              )
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index ec870f0..66bfafa 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -525,6 +525,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     }
     var stepConfigs = this.createStepConfigs();
     var serviceConfigs = this.renderConfigs(stepConfigs, configs);
+    this.addUidAndGidRepresentations(serviceConfigs);
     // if HA is enabled -> Make some reconfigurations
     if (this.get('wizardController.name') === 'addServiceController') {
       this.updateComponentActionConfigs(configs, serviceConfigs);
@@ -785,6 +786,38 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
   },
 
   /**
+   * Set the uid property for user properties. The uid is later used to help map the user and uid values in adjacent columns
+   * @param {object} miscSvc
+   * @param {string} svcName
+   * @private
+   */
+  _setUID: function (miscSvc, svcName) {
+    var user = miscSvc.configs.findProperty('name', svcName + '_user');
+    if (user) {
+      var uid = miscSvc.configs.findProperty('name', user.value + '_uid');
+      if (uid) {
+        user.set('ugid', uid);
+      }
+    }
+  },
+
+  /**
+   * Set the gid property for group properties. The gid is later used to help map the group and gid values in adjacent columns
+   * @param {object} miscSvc
+   * @param {string} svcName
+   * @private
+   */
+  _setGID: function (miscSvc, svcName) {
+    var group = miscSvc.configs.findProperty('name', svcName + '_group');
+    if (group) {
+      var gid = miscSvc.configs.findProperty('name', group.value + '_gid');
+      if (gid) {
+        group.set('ugid', gid);
+      }
+    }
+  },
+
+  /**
    * render configs, distribute them by service
    * and wrap each in ServiceConfigProperty object
    * @param stepConfigs
@@ -824,6 +857,11 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
       this.updateHostOverrides(serviceConfigProperty, _config);
       if (this.get('wizardController.name') === 'addServiceController') {
         this._updateIsEditableFlagForConfig(serviceConfigProperty, true);
+        //since the override_uid and ignore_groupusers_create changes are not saved to the database post install, they should be editable only
+        //during initial cluster installation
+        if (['override_uid', 'ignore_groupsusers_create'].contains(serviceConfigProperty.get('name'))) {
+          serviceConfigProperty.set('isEditable', false);
+        }
       }
       if (!this.get('content.serviceConfigProperties.length') && !serviceConfigProperty.get('hasInitialValue')) {
         App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, dependencies);
@@ -843,6 +881,35 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     return stepConfigs;
   },
 
+  addUidAndGidRepresentations: function(serviceConfigs) {
+    //map the uids to the corresponding users
+    var miscSvc = serviceConfigs.findProperty('serviceName', 'MISC');
+    if (miscSvc) {
+      //iterate through the list of users and groups and assign the uid/gid accordingly
+      //user properties are servicename_user
+      //uid properties are value of servicename_user + _uid
+      //group properties are servicename_group
+      //gid properties are value of servicename_group + _gid
+      //we will map the users/uids and groups/gids based on this assumption
+      this.get('selectedServiceNames').forEach(function (serviceName) {
+        this._setUID(miscSvc, serviceName.toLowerCase());
+        this._setGID(miscSvc, serviceName.toLowerCase());
+      }, this);
+
+      //for zookeeper, the user property name does not follow the convention that users for other services do. i.e. the user property name is not servicename_user as is the case with other services
+      //the user property name is zk_user and not zookeeper_user, hence set the uid for zk_user separately
+      this._setUID(miscSvc, 'zk');
+      //the user property name is mapred_user and not mapreduce2_user for mapreduce2 service, hence set the uid for mapred_user separately
+      this._setUID(miscSvc, 'mapred');
+      //for haddop, the group property name does not follow the convention that groups for other services do. i.e. the group property name is not servicename_group as is the case with other services
+      //the group property name is user_group and not zookeeper_group, hence set the gid for user_group separately
+      this._setGID(miscSvc, 'user');
+
+      // uid/gid properties are displayed in a separate column, hence prevent the properties from showing up on a separate line
+      miscSvc.configs.filterProperty('displayType', 'uid_gid').setEach('isVisible', false);
+    }
+  },
+
   /**
    * Add host name properties to appropriate categories (for installer and add service)
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
index 2c8959d..0739dcc 100644
--- a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
+++ b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
@@ -197,9 +197,14 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
    * @param config
    */
   handleSpecialProperties: function(config) {
-    if (!config.StackConfigurations.property_type.contains('ADDITIONAL_USER_PROPERTY')) {
+    var types = config.StackConfigurations.property_type;
+    if (!types.contains('ADDITIONAL_USER_PROPERTY')) {
       config.index = App.StackService.displayOrder.indexOf(config.StackConfigurations.service_name) + 1 || 30;
     }
+    // displayType from stack ignored, cause UID and GID should be shown along with service's user config
+    if (types.contains('UID') || types.contains('GID')) {
+      config.StackConfigurations.property_value_attributes.type = 'uid_gid';
+    }
     config.StackConfigurations.service_name = 'MISC';
     config.category = 'Users and Groups';
   },
@@ -210,7 +215,12 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
    * @returns {Boolean}
    */
   isMiscService: function(type) {
-    return type.length && (type.contains('USER') || type.contains('GROUP') || type.contains('ADDITIONAL_USER_PROPERTY'));
+    return type.length &&
+      (type.contains('USER')
+      || type.contains('GROUP')
+      || type.contains('ADDITIONAL_USER_PROPERTY')
+      || type.contains('UID')
+      || type.contains('GID'));
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index cc02624..12adf88 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1145,6 +1145,21 @@ h1 {
   }
 }
 
+.serviceConfigUGIDLbl {
+  display: inline-block;
+  text-align: left;
+  margin-left: 82px;
+  width: 100px;
+}
+
+.serviceConfigUGID {
+  width: 150px !important;
+}
+
+.serviceConfigNoUGID {
+  width: 500px !important;
+}
+
 #serviceConfig {
   margin-top: 20px;
   .alert{

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
new file mode 100644
index 0000000..8ad85a6
--- /dev/null
+++ b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
@@ -0,0 +1,27 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.isUIDGIDVisible}}
+    {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigUGID"}}
+    <label class="serviceConfigUGIDLbl" {{bindAttr for="view.serviceConfig.ugid.name"}}>
+        {{view.serviceConfig.ugid.displayName}}
+    </label>
+    {{view Ember.TextField valueBinding="view.serviceConfig.ugid.value" class="serviceConfigUGID"}}
+{{else}}
+    {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigNoUGID"}}
+{{/if}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 86b01ef..0c3d280 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -562,6 +562,9 @@ App.config = Em.Object.create({
    */
   getViewClass: function (displayType, dependentConfigPattern, unit) {
     switch (displayType) {
+      case 'user':
+      case 'group':
+        return App.ServiceConfigTextFieldUserGroupWithID;
       case 'checkbox':
       case 'boolean':
         return dependentConfigPattern ? App.ServiceConfigCheckboxWithDependencies : App.ServiceConfigCheckbox;

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_configs_by_category_view.js b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
index a0fb2aa..7fad44e 100644
--- a/ambari-web/app/views/common/configs/service_configs_by_category_view.js
+++ b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
@@ -46,6 +46,7 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.UserPref, App.ConfigOverri
    * @type {App.ServiceConfigProperty[]}
    */
   serviceConfigs: null,
+  isUIDGIDVisible: true,
 
   /**
    * This is array of all the properties which apply
@@ -732,6 +733,11 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.UserPref, App.ConfigOverri
   setRecommendedValue: function (event) {
     var serviceConfigProperty = event.contexts[0];
     serviceConfigProperty.set('value', serviceConfigProperty.get('recommendedValue'));
+
+    //in case of USER/GROUP fields, if they have uid/gid set, then these need to be reset to the recommended value as well
+    if (serviceConfigProperty.get('ugid')) {
+      serviceConfigProperty.set('ugid.value', serviceConfigProperty.get('ugid.recommendedValue'));
+    }
     serviceConfigProperty = null;
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa729a5b/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index a1e501b..2daf26c 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -219,6 +219,40 @@ App.ServiceConfigTextField = Ember.TextField.extend(App.ServiceConfigPopoverSupp
 });
 
 /**
+ * Customized input control for user/group configs with corresponding uid/gid specified
+ * @type {Em.View}
+ */
+App.ServiceConfigTextFieldUserGroupWithID = Ember.View.extend(App.ServiceConfigPopoverSupport, {
+  valueBinding: 'serviceConfig.value',
+  placeholderBinding: 'serviceConfig.savedValue',
+  classNames: 'display-inline-block',
+
+  templateName: require('templates/wizard/controls_service_config_usergroup_with_id'),
+
+  isUIDGIDVisible: function () {
+    var overrideUidDisabled = this.get('parentView').serviceConfigs.findProperty('name', 'override_uid').value === 'false';
+    //don't display the ugid field if there is no uid/gid for this property or override_uid is unchecked
+    if (Em.isNone(this.get('serviceConfig.ugid')) || overrideUidDisabled) {
+      return false;
+    }
+
+    var serviceName = this.get('serviceConfig').name.substr(0, this.get('serviceConfig').name.indexOf('_')).toUpperCase();
+    if (serviceName === 'ZK') {
+      serviceName = 'ZOOKEEPER';
+    }
+    if (serviceName === 'MAPRED') {
+      serviceName = 'YARN';
+    }
+    //addServiceController and service already installed or Hadoop user group
+    if (App.Service.find(serviceName).get('isLoaded') || serviceName === 'USER') {
+      return false;
+    }
+
+    return this.get('parentView.isUIDGIDVisible');
+  }.property('parentView.isUIDGIDVisible')
+});
+
+/**
  * Customized input control with Units type specified
  * @type {Em.View}
  */
@@ -415,6 +449,11 @@ App.ServiceConfigCheckbox = Ember.Checkbox.extend(App.ServiceConfigPopoverSuppor
       this.set('serviceConfig.value', this.get(this.get('checked') + 'Value'));
       this.get('serviceConfig').set("editDone", true);
       this.sendRequestRorDependentConfigs(this.get('serviceConfig'));
+
+      //if the checkbox being toggled is the 'Have Ambari manage UIDs' in Misc Tab, show/hide uid/gid column accordingly
+      if (this.get('serviceConfig.name') === 'override_uid') {
+         this.set('parentView.isUIDGIDVisible', this.get('checked'));
+      }
     }
   }.observes('checked'),
 


[48/50] [abbrv] ambari git commit: AMBARI-21501. Make HSI's 'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.

Posted by jo...@apache.org.
AMBARI-21501. Make HSI's 'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9cd7fbe0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9cd7fbe0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9cd7fbe0

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 9cd7fbe0af5fc3dba0a6bd553b55ceb7ae8b70cf
Parents: 0ed09cd
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Jul 17 12:15:19 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Jul 17 15:07:12 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 -------------------
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 2 files changed, 1 insertion(+), 152 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9cd7fbe0/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
deleted file mode 100644
index b6e57e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
+++ /dev/null
@@ -1,151 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HIVE",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "hive-site": {
-            "hive.metastore.sasl.enabled": "true",
-            "hive.server2.authentication": "KERBEROS"
-          }
-        },
-        {
-          "ranger-hive-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "HIVE_METASTORE",
-          "identities": [
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-site/hive.metastore.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "HIVE_SERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "hive_server_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type": "service",
-                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
-                "local_username": "${hive-env/hive_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.service.keytab",
-                "owner": {
-                  "name": "${hive-env/hive_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "atlas_kafka",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
-              },
-              "keytab": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
-              }
-            },
-            {
-              "name": "ranger_audit",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "HIVE_SERVER_INTERACTIVE",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/spnego"
-            },
-            {
-              "name": "/YARN/NODEMANAGER/llap_zk_hive"
-            }
-          ]
-        },
-        {
-          "name": "WEBHCAT_SERVER",
-          "identities": [
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "webhcat-site/templeton.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "webhcat-site/templeton.kerberos.keytab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "core-site": {
-                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
-              }
-            },
-            {
-              "webhcat-site": {
-                "templeton.kerberos.secret": "secret",
-                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9cd7fbe0/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index 60d50eb..b1501b8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": ""
+                  "access": "r"
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },


[08/50] [abbrv] ambari git commit: AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)

Posted by jo...@apache.org.
AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1f54c6e2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1f54c6e2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1f54c6e2

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 1f54c6e272a5a2ad176619062b31ca18bbdf93ea
Parents: 83761d4
Author: Di Li <di...@apache.org>
Authored: Wed Jul 12 15:59:35 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Wed Jul 12 15:59:35 2017 -0400

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_service.py    | 17 +++++++++++++++++
 .../0.96.0.2.0/package/scripts/params_linux.py     |  9 +++++++++
 .../BigInsights/4.2.5/upgrades/config-upgrade.xml  | 11 +++++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml     |  5 +++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml    | 11 +++++++++++
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml |  7 ++++++-
 6 files changed, 59 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index a1003dc..3b8e494 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -19,6 +19,7 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.core.logger import Logger
 
 def hbase_service(
   name,
@@ -32,6 +33,22 @@ def hbase_service(
     pid_expression = as_sudo(["cat", pid_file])
     no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
     
+    # delete wal log if HBase version has moved down
+    if params.to_backup_wal_dir:
+      wal_directory = params.wal_directory
+      timestamp = datetime.datetime.now()
+      timestamp_format = '%Y%m%d%H%M%S'
+      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
+
+      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
+      try:
+        Execute ( rm_cmd,
+          user = params.hbase_user
+        )
+      except Exception, e:
+        #Should still allow HBase Start/Stop to proceed
+        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
+
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 1ee5248..6617a80 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -44,6 +44,7 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.expect import expect
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+from resource_management.libraries.functions.constants import Direction
 
 # server configurations
 config = Script.get_config()
@@ -440,3 +441,11 @@ if has_atlas:
   atlas_with_managed_hbase = len(zk_hosts_matches) > 0
 else:
   atlas_with_managed_hbase = False
+
+wal_directory = "/apps/hbase/data/MasterProcWALs"
+
+backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
+
+#Need to make sure not to keep removing WAL logs once EU is finalized.
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index 42999b2..b51a744 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -52,6 +52,17 @@
       </component>
     </service>
     
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
+        <changes>
+          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
+            <type>hbase-env</type>
+            <set key="backup_wal_dir" value="true"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index a96ede9..f3c73a0 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -182,6 +182,11 @@
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
       
+      <!-- HBASE -->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
+        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index f9e3e15..b46f476 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -52,6 +52,17 @@
       </component>
     </service>
     
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
+        <changes>
+          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
+            <type>hbase-env</type>
+            <set key="backup_wal_dir" value="true"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index a96ede9..4867626 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -181,7 +181,12 @@
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
-      
+
+      <!-- HBASE -->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
+        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>


[47/50] [abbrv] ambari git commit: AMBARI-21481. Upgrading IOP cluster with Spark2 to Ambari 2.5.2 fails on start because config mapping spark2-javaopts-properties is never selected (alejandro)

Posted by jo...@apache.org.
AMBARI-21481. Upgrading IOP cluster with Spark2 to Ambari 2.5.2 fails on start because config mapping spark2-javaopts-properties is never selected (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ed09cd5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ed09cd5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ed09cd5

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 0ed09cd5342cfc4cac0d6061a7b7b9a3cef127c1
Parents: 13bcea0
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Fri Jul 14 16:15:07 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Jul 17 14:29:36 2017 -0700

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog252.java       | 99 ++++++++++++++++++++
 .../configuration/spark-javaopts-properties.xml |  3 +
 .../spark2-javaopts-properties.xml              |  5 +-
 .../4.2.5/services/SPARK2/metainfo.xml          |  2 +-
 .../configuration/spark-javaopts-properties.xml |  3 +
 5 files changed, 110 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed09cd5/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 3c8686c..ea1b034 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,12 +18,19 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -34,6 +41,8 @@ import org.apache.commons.lang.StringUtils;
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The {@link org.apache.ambari.server.upgrade.UpgradeCatalog252} upgrades Ambari from 2.5.1 to 2.5.2.
@@ -54,6 +63,13 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
 
   private static final String CLUSTER_ENV = "cluster-env";
 
+  private static final List<String> configTypesToEnsureSelected = Arrays.asList("spark2-javaopts-properties");
+  
+  /**
+   * Logger.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog252.class);
+
   /**
    * Constructor.
    *
@@ -102,6 +118,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     resetStackToolsAndFeatures();
+    ensureConfigTypesHaveAtLeastOneVersionSelected();
   }
 
   /**
@@ -197,4 +214,86 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
       updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
     }
   }
+
+  /**
+   * When doing a cross-stack upgrade, we found that one config type (spark2-javaopts-properties)
+   * did not have any mappings that were selected, so it caused Ambari Server start to fail on the DB Consistency Checker.
+   * To fix this, iterate over all config types and ensure that at least one is selected.
+   * If none are selected, then pick the one with the greatest time stamp; this should be safe since we are only adding
+   * more data to use as opposed to removing.
+   */
+  private void ensureConfigTypesHaveAtLeastOneVersionSelected() {
+    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+    List<ClusterEntity> clusters = clusterDAO.findAll();
+
+    if (null == clusters) {
+      return;
+    }
+
+    for (ClusterEntity clusterEntity : clusters) {
+      LOG.info("Ensuring all config types have at least one selected config for cluster {}", clusterEntity.getClusterName());
+
+      boolean atLeastOneChanged = false;
+      Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
+
+      if (configMappingEntities != null) {
+        Set<String> configTypesNotSelected = new HashSet<>();
+        Set<String> configTypesWithAtLeastOneSelected = new HashSet<>();
+
+        for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
+          String typeName = clusterConfigMappingEntity.getType();
+
+          if (clusterConfigMappingEntity.isSelected() == 1) {
+            configTypesWithAtLeastOneSelected.add(typeName);
+          } else {
+            configTypesNotSelected.add(typeName);
+          }
+        }
+
+        // Due to the ordering, eliminate any configs with at least one selected.
+        configTypesNotSelected.removeAll(configTypesWithAtLeastOneSelected);
+        if (!configTypesNotSelected.isEmpty()) {
+          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
+
+          LOG.info("Filtering only config types these config types: {}", StringUtils.join(configTypesToEnsureSelected, ", "));
+          // Get the intersection with a subset of configs that are allowed to be selected during the migration.
+          configTypesNotSelected.retainAll(configTypesToEnsureSelected);
+        }
+
+        if (!configTypesNotSelected.isEmpty()) {
+          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
+
+          for (String typeName : configTypesNotSelected) {
+            ClusterConfigMappingEntity clusterConfigMappingWithGreatestTimeStamp = null;
+
+            for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
+              if (typeName.equals(clusterConfigMappingEntity.getType())) {
+
+                if (null == clusterConfigMappingWithGreatestTimeStamp) {
+                  clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
+                } else {
+                  if (clusterConfigMappingEntity.getCreateTimestamp() >= clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp()) {
+                    clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
+                  }
+                }
+              }
+            }
+
+            if (null != clusterConfigMappingWithGreatestTimeStamp) {
+              LOG.info("Saving. Config type {} has a mapping with tag {} and greatest timestamp {} that is not selected, so will mark it selected.",
+                  typeName, clusterConfigMappingWithGreatestTimeStamp.getTag(), clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp());
+              atLeastOneChanged = true;
+              clusterConfigMappingWithGreatestTimeStamp.setSelected(1);
+            }
+          }
+        } else {
+          LOG.info("All config types have at least one mapping that is selected. Nothing to do.");
+        }
+      }
+
+      if (atLeastOneChanged) {
+        clusterDAO.mergeConfigMappings(configMappingEntities);
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed09cd5/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
index a197e34..c8fe152 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
@@ -23,6 +23,9 @@
     <name>content</name>
     <description>Spark-javaopts-properties</description>
     <value> </value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed09cd5/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/configuration/spark2-javaopts-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/configuration/spark2-javaopts-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/configuration/spark2-javaopts-properties.xml
index f8d50fc..e8c3f5a 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/configuration/spark2-javaopts-properties.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/configuration/spark2-javaopts-properties.xml
@@ -23,7 +23,10 @@
     <name>content</name>
     <description>Spark2-javaopts-properties</description>
     <value> </value>
-    <on-ambari-upgrade add="true"/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed09cd5/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/metainfo.xml
index bf75f47..1692890 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SPARK2/metainfo.xml
@@ -79,7 +79,7 @@
        
       <configuration-dependencies>
         <config-type>spark2-defaults</config-type>
-	<config-type>spark2-javaopts-properties</config-type>
+        <config-type>spark2-javaopts-properties</config-type>
         <config-type>spark2-thrift-sparkconf</config-type>
       </configuration-dependencies>
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed09cd5/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-javaopts-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-javaopts-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-javaopts-properties.xml
index 77a7282..f7c8c7e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-javaopts-properties.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-javaopts-properties.xml
@@ -23,6 +23,9 @@
     <name>content</name>
     <description>Spark-javaopts-properties</description>
     <value> </value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
 </configuration>


[21/50] [abbrv] ambari git commit: AMBARI-21469. Stop Spark fails due to missing stack selector

Posted by jo...@apache.org.
AMBARI-21469. Stop Spark fails due to missing stack selector


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7dbcb754
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7dbcb754
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7dbcb754

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 7dbcb7542e0d6d58fe11b6549e3c2c07574258a6
Parents: 33c279c
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jul 13 21:40:27 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jul 13 21:40:27 2017 +0200

----------------------------------------------------------------------
 .../BigInsights/4.2/services/HBASE/package/scripts/params.py    | 4 ++--
 .../BigInsights/4.2/services/SPARK/package/scripts/params.py    | 5 ++++-
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7dbcb754/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/params.py
index 9b61674..2c672aa 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/params.py
@@ -97,10 +97,10 @@ master_heapsize = treat_value_as_mb(master_heapsize_cfg)
 
 hbase_javaopts_properties = config['configurations']['hbase-javaopts-properties']['content']
 
-iop_full_version = get_iop_version()
-
 hbase_javaopts_properties = str(hbase_javaopts_properties)	
 if hbase_javaopts_properties.find('-Diop.version') == -1:
+  current_version = default("/hostLevelParams/current_version", None)
+  iop_full_version = format_stack_version(current_version)
   hbase_javaopts_properties = hbase_javaopts_properties+ ' -Diop.version=' + str(iop_full_version)
 
 regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/7dbcb754/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
index 6d51b57..8ec5af8 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
@@ -22,6 +22,7 @@ from resource_management.libraries.functions.default import default
 from resource_management import *
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
 from spark import *
 import status_params
 
@@ -39,7 +40,9 @@ component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
-iop_full_version = get_iop_version()
+
+current_version = default("/hostLevelParams/current_version", None)
+iop_full_version = format_stack_version(current_version)
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)


[44/50] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21348

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21348


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1e09ad0b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1e09ad0b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1e09ad0b

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 1e09ad0b074f197ef36ea9c9f3a70de443e80e1e
Parents: 061467b 651fe3d
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Jul 17 12:19:45 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Jul 17 12:19:45 2017 -0400

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |   3 +
 .../stackVersions/StackVersionsCreateCtrl.js    |   3 +-
 .../libraries/functions/stack_tools.py          |   2 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   6 +-
 ambari-server/pom.xml                           |   2 +-
 ambari-server/sbin/ambari-server                |   6 +-
 .../ambari/server/checks/CheckDescription.java  |  38 +-
 .../checks/ComponentsExistInRepoCheck.java      | 142 ++++++
 .../AmbariCustomCommandExecutionHelper.java     |  14 +-
 .../AmbariManagementControllerImpl.java         | 122 +++--
 .../internal/HostResourceProvider.java          |   1 +
 .../apache/ambari/server/orm/DBAccessor.java    |  14 +
 .../ambari/server/orm/DBAccessorImpl.java       |  24 +
 .../LdapToPamMigrationHelper.java               |  73 +++
 .../server/security/authorization/Users.java    |   4 +
 .../org/apache/ambari/server/state/Host.java    |   4 +-
 .../ambari/server/state/PropertyInfo.java       |   2 +
 .../ambari/server/state/host/HostImpl.java      |  29 +-
 .../KerberosDescriptorUpdateHelper.java         |   9 +-
 .../server/upgrade/UpgradeCatalog220.java       | 197 +-------
 .../server/upgrade/UpgradeCatalog252.java       |  11 +-
 ambari-server/src/main/python/ambari-server.py  |  10 +-
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../main/python/ambari_server/setupSecurity.py  | 123 ++++-
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   8 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  26 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  11 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   7 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.10.0/configuration/ranger-kafka-audit.xml |  58 +++
 .../common-services/KAFKA/0.10.0/kerberos.json  |  79 ++++
 .../common-services/KAFKA/0.10.0/metainfo.xml   |  28 ++
 .../KAFKA/0.8.1/package/scripts/kafka.py        |  12 +
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |   2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  26 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   4 +-
 .../package/scripts/oozie_server_upgrade.py     |  15 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  15 +-
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   6 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  12 +-
 .../0.8/services/HDFS/package/scripts/params.py |  11 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../0.8/services/YARN/package/scripts/params.py |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../BigInsights/4.0/services/SPARK/metainfo.xml |   2 +-
 .../package/scripts/spark_thrift_server.py      | 125 ++++++
 .../SPARK/package/scripts/thrift_server.py      | 125 ------
 .../4.0/stack-advisor/stack_advisor_25.py       |   5 +-
 .../stacks/BigInsights/4.2.5/metainfo.xml       |   2 +-
 .../HBASE/package/files/draining_servers.rb     | 164 +++++++
 .../HBASE/package/files/hbase-smoke-cleanup.sh  |  23 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |  34 ++
 .../services/HBASE/package/scripts/__init__.py  |  19 +
 .../services/HBASE/package/scripts/functions.py |  54 +++
 .../services/HBASE/package/scripts/hbase.py     | 234 ++++++++++
 .../HBASE/package/scripts/hbase_client.py       |  82 ++++
 .../HBASE/package/scripts/hbase_decommission.py |  93 ++++
 .../HBASE/package/scripts/hbase_master.py       | 163 +++++++
 .../HBASE/package/scripts/hbase_regionserver.py | 166 +++++++
 .../package/scripts/hbase_restgatewayserver.py  |  83 ++++
 .../HBASE/package/scripts/hbase_service.py      |  93 ++++
 .../HBASE/package/scripts/hbase_upgrade.py      |  41 ++
 .../services/HBASE/package/scripts/params.py    |  29 ++
 .../HBASE/package/scripts/params_linux.py       | 447 +++++++++++++++++++
 .../HBASE/package/scripts/params_windows.py     |  43 ++
 .../package/scripts/phoenix_queryserver.py      |  88 ++++
 .../HBASE/package/scripts/phoenix_service.py    |  55 +++
 .../HBASE/package/scripts/service_check.py      |  95 ++++
 .../HBASE/package/scripts/setup_ranger_hbase.py | 106 +++++
 .../HBASE/package/scripts/status_params.py      |  68 +++
 .../services/HBASE/package/scripts/upgrade.py   |  65 +++
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 117 +++++
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 116 +++++
 .../HBASE/package/templates/hbase-smoke.sh.j2   |  44 ++
 .../HBASE/package/templates/hbase.conf.j2       |  35 ++
 .../package/templates/hbase_client_jaas.conf.j2 |  23 +
 .../templates/hbase_grant_permissions.j2        |  40 ++
 .../package/templates/hbase_master_jaas.conf.j2 |  26 ++
 .../templates/hbase_queryserver_jaas.conf.j2    |  26 ++
 .../templates/hbase_regionserver_jaas.conf.j2   |  26 ++
 .../package/templates/hbase_rest_jaas.conf.j2   |  26 ++
 .../HBASE/package/templates/regionservers.j2    |  20 +
 .../services/HBASE/package/scripts/params.py    |   4 +-
 .../BigInsights/4.2/services/KNOX/kerberos.json |   6 -
 .../RANGER/configuration/ranger-admin-site.xml  |  14 +
 .../BigInsights/4.2/services/SPARK/metainfo.xml |   2 +-
 .../services/SPARK/package/scripts/params.py    |   5 +-
 .../package/scripts/spark_thrift_server.py      | 119 +++++
 .../SPARK/package/scripts/thrift_server.py      | 119 -----
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |   9 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/exclude_hosts_list.j2     |  21 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |  13 +-
 .../before-ANY/scripts/shared_initialization.py |  45 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml  |   1 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |  11 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   6 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   1 +
 .../configuration/application-properties.xml    |  17 +
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   4 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 .../src/main/resources/stacks/stack_advisor.py  |  18 +
 .../checks/ComponentExistsInRepoCheckTest.java  | 329 ++++++++++++++
 .../AmbariManagementControllerTest.java         |   8 +-
 .../KerberosDescriptorUpdateHelperTest.java     |  70 +++
 .../src/test/python/TestAmbariServer.py         |  13 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +-
 .../python/stacks/2.0.6/configs/default.json    |   2 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +-
 .../2.0.6/configs/default_no_install.json       |   2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +-
 .../default_update_exclude_file_only.json       |   2 +-
 .../2.0.6/configs/default_with_bucket.json      |   2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |   2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   2 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 294 +++++++-----
 .../test/python/stacks/2.3/configs/ats_1_5.json |   2 +-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 ++++---
 .../python/stacks/2.5/configs/hsi_default.json  |   2 +-
 .../2.5/configs/hsi_default_for_restart.json    |   2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   2 +-
 .../app/controllers/wizard/step7_controller.js  |  67 +++
 .../configs/stack_config_properties_mapper.js   |  14 +-
 ambari-web/app/styles/application.less          |  15 +
 ...ontrols_service_config_usergroup_with_id.hbs |  27 ++
 ambari-web/app/utils/config.js                  |   3 +
 .../configs/service_configs_by_category_view.js |   6 +
 ambari-web/app/views/common/controls_view.js    |  39 ++
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../YARN/package/scripts/params_linux.py        |   9 +-
 .../YARN/package/scripts/params_windows.py      |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 159 files changed, 5154 insertions(+), 822 deletions(-)
----------------------------------------------------------------------



[20/50] [abbrv] ambari git commit: AMBARI-21461 Spark thrift server restart fails during migration EU (dili)

Posted by jo...@apache.org.
AMBARI-21461 Spark thrift server restart fails during migration EU (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/33c279cf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/33c279cf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/33c279cf

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 33c279cf272e824d1efdea1aec7bba667b618596
Parents: 65e57a1
Author: Di Li <di...@apache.org>
Authored: Thu Jul 13 13:42:33 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Jul 13 13:42:33 2017 -0400

----------------------------------------------------------------------
 .../BigInsights/4.0/services/SPARK/metainfo.xml |   2 +-
 .../package/scripts/spark_thrift_server.py      | 125 +++++++++++++++++++
 .../SPARK/package/scripts/thrift_server.py      | 125 -------------------
 .../BigInsights/4.2/services/SPARK/metainfo.xml |   2 +-
 .../package/scripts/spark_thrift_server.py      | 119 ++++++++++++++++++
 .../SPARK/package/scripts/thrift_server.py      | 119 ------------------
 6 files changed, 246 insertions(+), 246 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
index 02abc62..ef89698 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
@@ -83,7 +83,7 @@
             </dependency>
           </dependencies>
           <commandScript>
-            <script>scripts/thrift_server.py</script>
+            <script>scripts/spark_thrift_server.py</script>
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>

http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_thrift_server.py
new file mode 100755
index 0000000..39e15d3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_thrift_server.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import socket
+import os
+from resource_management import *
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions import Direction
+from spark import *
+
+
+class ThriftServer(Script):
+
+  def get_component_name(self):
+    return "spark-thriftserver"
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "spark", params.version)
+      stack_select.select("spark-thriftserver", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{spark_thrift_server_stop}')
+    if params.security_enabled:
+      Execute(daemon_cmd,
+              user=params.hive_user,
+              environment={'JAVA_HOME': params.java_home}
+              )
+    else:
+      Execute(daemon_cmd,
+              user=params.spark_user,
+              environment={'JAVA_HOME': params.java_home}
+      )
+    if os.path.isfile(params.spark_thrift_server_pid_file):
+      os.remove(params.spark_thrift_server_pid_file)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    # TODO this looks wrong, maybe just call spark(env)
+    self.configure(env)
+
+    if params.security_enabled:
+      hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+      hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
+      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+      Execute(hive_kinit_cmd, user=params.hive_user)
+
+    # FIXME! TODO! remove this after soft link bug is fixed:
+    #if not os.path.islink('/usr/iop/current/spark'):
+    #  iop_version = get_iop_version()
+    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
+    #  Execute(cmd)
+
+    daemon_cmd = format('{spark_thrift_server_start}')
+    no_op_test = format(
+      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
+    if (upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE) or params.security_enabled:
+      Execute(daemon_cmd,
+              user=params.hive_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+              )
+    else:
+      Execute(daemon_cmd,
+              user=params.spark_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+      )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    pid_file = format("{spark_thrift_server_pid_file}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  # Note: This function is not called from start()/install()
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    spark(env)
+
+if __name__ == "__main__":
+  ThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
deleted file mode 100755
index 39e15d3..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import socket
-import os
-from resource_management import *
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import conf_select
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from resource_management.libraries.functions import Direction
-from spark import *
-
-
-class ThriftServer(Script):
-
-  def get_component_name(self):
-    return "spark-thriftserver"
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-thriftserver", params.version)
-
-  def install(self, env):
-    self.install_packages(env)
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    daemon_cmd = format('{spark_thrift_server_stop}')
-    if params.security_enabled:
-      Execute(daemon_cmd,
-              user=params.hive_user,
-              environment={'JAVA_HOME': params.java_home}
-              )
-    else:
-      Execute(daemon_cmd,
-              user=params.spark_user,
-              environment={'JAVA_HOME': params.java_home}
-      )
-    if os.path.isfile(params.spark_thrift_server_pid_file):
-      os.remove(params.spark_thrift_server_pid_file)
-
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    # TODO this looks wrong, maybe just call spark(env)
-    self.configure(env)
-
-    if params.security_enabled:
-      hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-      hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
-      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
-      Execute(hive_kinit_cmd, user=params.hive_user)
-
-    # FIXME! TODO! remove this after soft link bug is fixed:
-    #if not os.path.islink('/usr/iop/current/spark'):
-    #  iop_version = get_iop_version()
-    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
-    #  Execute(cmd)
-
-    daemon_cmd = format('{spark_thrift_server_start}')
-    no_op_test = format(
-      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
-    if (upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE) or params.security_enabled:
-      Execute(daemon_cmd,
-              user=params.hive_user,
-              environment={'JAVA_HOME': params.java_home},
-              not_if=no_op_test
-              )
-    else:
-      Execute(daemon_cmd,
-              user=params.spark_user,
-              environment={'JAVA_HOME': params.java_home},
-              not_if=no_op_test
-      )
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    pid_file = format("{spark_thrift_server_pid_file}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  # Note: This function is not called from start()/install()
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    spark(env)
-
-if __name__ == "__main__":
-  ThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/metainfo.xml
index 7b6bae1..819ffd4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/metainfo.xml
@@ -83,7 +83,7 @@
             </dependency>
           </dependencies>
           <commandScript>
-            <script>scripts/thrift_server.py</script>
+            <script>scripts/spark_thrift_server.py</script>
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>

http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/spark_thrift_server.py
new file mode 100755
index 0000000..a0226b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/spark_thrift_server.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import socket
+import os
+from resource_management import *
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions import Direction
+from spark import *
+
+
+class ThriftServer(Script):
+
+  def get_component_name(self):
+    return "spark-thriftserver"
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "spark", params.version)
+      stack_select.select("spark-thriftserver", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{spark_thrift_server_stop}')
+    Execute(daemon_cmd,
+            user=params.hive_user,
+            environment={'JAVA_HOME': params.java_home}
+    )
+    if os.path.isfile(params.spark_thrift_server_pid_file):
+      os.remove(params.spark_thrift_server_pid_file)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    # TODO this looks wrong, maybe just call spark(env)
+    self.configure(env)
+
+    if params.security_enabled:
+        hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+        hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
+        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+        Execute(hive_kinit_cmd, user=params.hive_user)
+
+    # FIXME! TODO! remove this after soft link bug is fixed:
+    #if not os.path.islink('/usr/iop/current/spark'):
+    #  iop_version = get_iop_version()
+    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
+    #  Execute(cmd)
+
+    daemon_cmd = format('{spark_thrift_server_start} --conf spark.ui.port={params.spark_thriftserver_ui_port}')
+    no_op_test = format(
+      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
+    if upgrade_type is not None and params.upgrade_direction == Direction.DOWNGRADE and not params.security_enabled:
+      Execute(daemon_cmd,
+              user=params.spark_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+              )
+    else:
+      Execute(daemon_cmd,
+              user=params.hive_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+      )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    pid_file = format("{spark_thrift_server_pid_file}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  # Note: This function is not called from start()/install()
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    spark(env)
+
+if __name__ == "__main__":
+  ThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/33c279cf/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/thrift_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/thrift_server.py
deleted file mode 100755
index d1a8b67..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/thrift_server.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import socket
-import os
-from resource_management import *
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import conf_select
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from resource_management.libraries.functions import Direction
-from spark import *
-
-
-class ThriftServer(Script):
-
-  def get_component_name(self):
-    return "spark-thriftserver"
-  
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-thriftserver", params.version)
-
-  def install(self, env):
-    self.install_packages(env)
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    daemon_cmd = format('{spark_thrift_server_stop}')
-    Execute(daemon_cmd,
-            user=params.hive_user,
-            environment={'JAVA_HOME': params.java_home}
-    )
-    if os.path.isfile(params.spark_thrift_server_pid_file):
-      os.remove(params.spark_thrift_server_pid_file)
-
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    # TODO this looks wrong, maybe just call spark(env)
-    self.configure(env)
-
-    if params.security_enabled:
-        hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-        hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
-        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
-        Execute(hive_kinit_cmd, user=params.hive_user)
-
-    # FIXME! TODO! remove this after soft link bug is fixed:
-    #if not os.path.islink('/usr/iop/current/spark'):
-    #  iop_version = get_iop_version()
-    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
-    #  Execute(cmd)
-
-    daemon_cmd = format('{spark_thrift_server_start} --conf spark.ui.port={params.spark_thriftserver_ui_port}')
-    no_op_test = format(
-      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
-    if upgrade_type is not None and params.upgrade_direction == Direction.DOWNGRADE and not params.security_enabled:
-      Execute(daemon_cmd,
-              user=params.spark_user,
-              environment={'JAVA_HOME': params.java_home},
-              not_if=no_op_test
-              )
-    else:
-      Execute(daemon_cmd,
-              user=params.hive_user,
-              environment={'JAVA_HOME': params.java_home},
-              not_if=no_op_test
-      )
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    pid_file = format("{spark_thrift_server_pid_file}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  # Note: This function is not called from start()/install()
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    spark(env)
-
-if __name__ == "__main__":
-  ThriftServer().execute()


[29/50] [abbrv] ambari git commit: AMBARI-21477: Remove Falcon proxy entries from Knox kerberos.json (dili)

Posted by jo...@apache.org.
AMBARI-21477: Remove Falcon proxy entries from Knox kerberos.json (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ec3cf228
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ec3cf228
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ec3cf228

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: ec3cf228542f6d9c41ef63c119dd9284d4d19acf
Parents: 9f0bba6
Author: Di Li <di...@apache.org>
Authored: Fri Jul 14 14:22:18 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Fri Jul 14 14:22:18 2017 -0400

----------------------------------------------------------------------
 .../stacks/BigInsights/4.2/services/KNOX/kerberos.json         | 6 ------
 1 file changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ec3cf228/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/kerberos.json
index 8ee2acc..6a89af6 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/kerberos.json
@@ -53,12 +53,6 @@
                 "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
                 "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
               }
-            },
-            {
-              "falcon-runtime.properties": {
-                "*.falcon.service.ProxyUserService.proxyuser.knox.groups": "${hadoop-env/proxyuser_group}",
-                "*.falcon.service.ProxyUserService.proxyuser.knox.hosts": "${host}"
-              }
             }
           ]
         }


[15/50] [abbrv] ambari git commit: AMBARI-21466. KNOX upgrade fails due to wrong stack root

Posted by jo...@apache.org.
AMBARI-21466. KNOX upgrade fails due to wrong stack root


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a7b6d5a0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a7b6d5a0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a7b6d5a0

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: a7b6d5a0fc72c3d20c50210e887b7294007b0dba
Parents: 0cb9194
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jul 13 12:54:47 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jul 13 12:54:47 2017 +0200

----------------------------------------------------------------------
 .../libraries/functions/stack_tools.py                 | 13 +++++++++++++
 .../upgrades/ChangeStackReferencesAction.java          |  4 +++-
 .../KNOX/0.5.0.2.2/package/scripts/params_linux.py     |  8 ++++++++
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py          |  2 +-
 .../upgrades/ChangeStackReferencesActionTest.java      |  1 +
 5 files changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 420ae11..830598b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -120,3 +120,16 @@ def get_stack_root(stack_name, stack_root_json):
     return "/usr/{0}".format(stack_name.lower())
 
   return stack_root[stack_name]
+
+
+def get_stack_name(stack_formatted):
+  """
+  Get the stack name (eg. HDP) from formatted string that may contain stack version (eg. HDP-2.6.1.0-123)
+  """
+  if stack_formatted is None:
+    return None
+
+  if '-' not in stack_formatted:
+    return stack_formatted
+
+  return stack_formatted.split('-')[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
index d75d031..03e5caf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
@@ -35,6 +35,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
@@ -45,6 +46,7 @@ import com.google.common.collect.Sets;
 public class ChangeStackReferencesAction extends AbstractServerAction {
 
   private static final Logger LOG = LoggerFactory.getLogger(ChangeStackReferencesAction.class);
+  private static final Set<String> SKIP_PROPERTIES = ImmutableSet.of("cluster-env/stack_root");
   private static final Set<Map.Entry<String, String>> REPLACEMENTS = Maps.asMap(
     Sets.newHashSet("/usr/iop", "iop/apps", "iop.version", "IOP_VERSION"),
     new Function<String, String>() {
@@ -83,7 +85,7 @@ public class ChangeStackReferencesAction extends AbstractServerAction {
         for (Map.Entry<String, String> entry : properties.entrySet()) {
           String key = entry.getKey();
           String original = entry.getValue();
-          if (original != null) {
+          if (original != null && !SKIP_PROPERTIES.contains(configType + "/" + key)) {
             String replaced = original;
             for (Map.Entry<String, String> replacement : REPLACEMENTS) {
               replaced = replaced.replace(replacement.getKey(), replacement.getValue());

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index 5a2ef19..9b0bbfc 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
 from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.stack_tools import get_stack_name, get_stack_root
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 from status_params import *
@@ -67,6 +68,13 @@ stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CO
 # DO NOT format it since we need the build number too.
 upgrade_from_version = default("/hostLevelParams/current_version", None)
 
+source_stack = default("/commandParams/source_stack", None)
+source_stack_name = get_stack_name(source_stack)
+if source_stack_name is not None and source_stack_name != stack_name:
+  source_stack_root = get_stack_root(source_stack_name, default('/configurations/cluster-env/stack_root', None))
+else:
+  source_stack_root = stack_root
+
 # server configurations
 # Default value used in HDP 2.3.0.0 and earlier.
 knox_data_dir = '/var/lib/knox/data'

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
index 917f340..fa035c7 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
@@ -91,7 +91,7 @@ def seed_current_data_directory():
     Logger.info("Seeding Knox data from prior version...")
 
     # <stack-root>/2.3.0.0-1234/knox/data/.
-    source_data_dir = os.path.join(params.stack_root, params.upgrade_from_version, "knox", "data", ".")
+    source_data_dir = os.path.join(params.source_stack_root, params.upgrade_from_version, "knox", "data", ".")
 
     # <stack-root>/current/knox-server/data
     target_data_dir = os.path.join(params.stack_root, "current", "knox-server", "data")

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
index 592a95f..1104c96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
@@ -69,6 +69,7 @@ public class ChangeStackReferencesActionTest {
     originalProperties.put("mapreduce_tar_source", "/usr/iop/current/hadoop-client/mapreduce.tar.gz");
     originalProperties.put("pig_tar_destination_folder", "hdfs:///iop/apps/{{ stack_version }}/pig/");
     originalProperties.put("pig_tar_source", "/usr/iop/current/pig-client/pig.tar.gz");
+    originalProperties.put("stack_root", "/usr/iop");
     expect(clusterEnv.getProperties()).andReturn(originalProperties).anyTimes();
 
     // this is the crux of the test


[25/50] [abbrv] ambari git commit: AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)

Posted by jo...@apache.org.
AMBARI-21426. Apply ZEPPELIN-2698 related changes in Ambari(prabhjyotsingh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bc06736e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bc06736e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bc06736e

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: bc06736ef6bcc170a9ebba22f98e834b9c148d6b
Parents: afea7bb
Author: Venkata Sairam <ve...@gmail.com>
Authored: Fri Jul 14 12:35:26 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Fri Jul 14 12:41:18 2017 +0530

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml              | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bc06736e/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
index 4032b2c..80ac2bb 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -95,7 +95,7 @@ export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
 ## Kerberos ticket refresh setting
 ##
 export KINIT_FAIL_THRESHOLD=5
-export LAUNCH_KERBEROS_REFRESH_INTERVAL=1d
+export KERBEROS_REFRESH_INTERVAL=1d
 
 ## Use provided spark installation ##
 ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit


[49/50] [abbrv] ambari git commit: AMBARI-21502. Cross-stack migration from BigInsights to HDP, EU needs to set hive-site custom.hive.warehouse.mode to 0770 (alejandro)

Posted by jo...@apache.org.
AMBARI-21502. Cross-stack migration from BigInsights to HDP, EU needs to set hive-site custom.hive.warehouse.mode to 0770 (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d8a5bad1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d8a5bad1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d8a5bad1

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: d8a5bad1b4b67367818646e7b65a2419021bb420
Parents: 9cd7fbe
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Jul 17 12:35:27 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Jul 17 16:42:56 2017 -0700

----------------------------------------------------------------------
 .../stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml    | 9 +++++++--
 .../4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml        | 7 +++++--
 .../stacks/BigInsights/4.2/upgrades/config-upgrade.xml      | 9 +++++++--
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml          | 7 +++++--
 4 files changed, 24 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d8a5bad1/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index 8c009a7..e476d57 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -134,7 +134,7 @@
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_0_0_hive_server_configure_authentication" summary="Configuring hive authentication">
+          <definition xsi:type="configure" id="biginsights_4_2_hive_server_configure_authentication" summary="Configuring hive authentication">
             <type>hive-site</type>
             <transfer operation="delete" delete-key="hive.metastore.event.listeners" if-key="hive.metastore.event.listeners" if-type="hive-site" if-value="com.ibm.biginsights.bigsql.sync.BIEventListener"/>
             <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
@@ -167,10 +167,15 @@
             <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
           </definition>
           
-          <definition xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure" summary="Configuring hive env for MariaDB/RedHat7 support">
+          <definition xsi:type="configure" id="biginsights_4_2_hive_env_configure" summary="Configuring hive-env for MariaDB/RedHat7 support">
             <type>hive-env</type>
             <set key="mariadb_redhat_support" value="true"/>
           </definition>
+
+          <definition xsi:type="configure" id="biginsights_4_2_hive_permissions" summary="Configuring hive-site permissions">
+            <type>hive-site</type>
+            <set key="custom.hive.warehouse.mode" value="0770" if-type="hive-site" if-key="custom.hive.warehouse.mode" if-key-state="absent"/>
+          </definition>
         </changes>
       </component>
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/d8a5bad1/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 7c1a9ce..cbd0550 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -255,10 +255,13 @@
 
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
-        <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>
+        <task xsi:type="configure" id="biginsights_4_2_hive_env_configure"/>
+      </execute-stage>
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply permission config changes for Hive Server">
+        <task xsi:type="configure" id="biginsights_4_2_hive_permissions"/>
       </execute-stage>
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
-        <task xsi:type="configure" id="biginsights_4_2_0_0_hive_server_configure_authentication"/>
+        <task xsi:type="configure" id="biginsights_4_2_hive_server_configure_authentication"/>
       </execute-stage>      
       <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for Hive WebHCat server">
         <task xsi:type="configure" id="biginsights_4_2_webhcat_server_update_environment_configurations" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/d8a5bad1/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index 310e504..dada6e2 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -158,7 +158,7 @@
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_0_0_hive_server_configure_authentication" summary="Configuring hive authentication">
+          <definition xsi:type="configure" id="biginsights_4_2_hive_server_configure_authentication" summary="Configuring hive authentication">
             <type>hive-site</type>
             <transfer operation="delete" delete-key="hive.metastore.event.listeners" if-key="hive.metastore.event.listeners" if-type="hive-site" if-value="com.ibm.biginsights.bigsql.sync.BIEventListener"/>
             <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
@@ -191,10 +191,15 @@
             <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
           </definition>
           
-          <definition xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure" summary="Configuring hive env for MariaDB/RedHat7 support">
+          <definition xsi:type="configure" id="biginsights_4_2_hive_env_configure" summary="Configuring hive-env for MariaDB/RedHat7 support">
             <type>hive-env</type>
             <set key="mariadb_redhat_support" value="true"/>
           </definition>
+
+          <definition xsi:type="configure" id="biginsights_4_2_hive_permissions" summary="Configuring hive-site permissions">
+            <type>hive-site</type>
+            <set key="custom.hive.warehouse.mode" value="0770" if-type="hive-site" if-key="custom.hive.warehouse.mode" if-key-state="absent"/>
+          </definition>
         </changes>
       </component>
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/d8a5bad1/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 5b8f8d9..3ea20ed 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -212,10 +212,13 @@
 
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
-        <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>
+        <task xsi:type="configure" id="biginsights_4_2_hive_env_configure"/>
+      </execute-stage>
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply permission config changes for Hive Server">
+        <task xsi:type="configure" id="biginsights_4_2_hive_permissions"/>
       </execute-stage>
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
-        <task xsi:type="configure" id="biginsights_4_2_0_0_hive_server_configure_authentication"/>
+        <task xsi:type="configure" id="biginsights_4_2_hive_server_configure_authentication"/>
       </execute-stage>      
       <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for Hive WebHCat server">
         <task xsi:type="configure" id="biginsights_4_2_webhcat_server_update_environment_configurations" />


[07/50] [abbrv] ambari git commit: AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-inte

Posted by jo...@apache.org.
AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-env template.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/83761d42
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/83761d42
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/83761d42

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 83761d425f437ef1b74a5669c1aa3cad1c074a26
Parents: c2b2210
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 11 15:37:08 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 12 12:02:34 2017 -0700

----------------------------------------------------------------------
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++++-----
 .../HIVE/configuration/hive-interactive-env.xml |  62 ++++----
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 +++++++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 6 files changed, 228 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 4b595a8..b2bc34a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -846,3 +846,7 @@ ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-prope
 
 if security_enabled:
   hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
+
+# For ldap - hive_check
+hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
+hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index a521d6d..db253d3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -122,7 +122,8 @@ class HiveServiceCheckDefault(HiveServiceCheck):
                                params.hive_server_principal, kinit_cmd, params.smokeuser,
                                transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
                                ssl=params.hive_ssl, ssl_keystore=ssl_keystore,
-                               ssl_password=ssl_password)
+                               ssl_password=ssl_password, ldap_username=params.hive_ldap_user,
+                               ldap_password=params.hive_ldap_passwd)
         Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
         workable_server_available = True
       except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
index a6cf1bc..929c10d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
@@ -60,56 +60,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index ada4859..86720f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -100,47 +100,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
 
     </value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
new file mode 100644
index 0000000..b6e57e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
@@ -0,0 +1,151 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "templeton.kerberos.secret": "secret",
+                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index b1501b8..60d50eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": "r"
+                  "access": ""
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },