You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/25 14:35:05 UTC

[01/19] ambari git commit: AMBARI-21528. Zookeeper server has incorrect memory setting, missing m in Xmx value (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-21450 54c4b4957 -> 54c57662b


AMBARI-21528. Zookeeper server has incorrect memory setting, missing m in Xmx value (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d4244f52
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d4244f52
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d4244f52

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: d4244f5206feca1bb6001eea6d550494f69e8762
Parents: 212ee1c
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 19 16:01:42 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Jul 20 14:16:42 2017 -0700

----------------------------------------------------------------------
 .../ZOOKEEPER/3.4.5/package/scripts/params_linux.py             | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d4244f52/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
index 0780d2e..b8e8f78 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
@@ -68,7 +68,10 @@ zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
 zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
 zk_pid_dir = status_params.zk_pid_dir
 zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize_value = default('configurations/zookeeper-env/zk_server_heapsize', "1024m")
+zk_server_heapsize_value = str(default('configurations/zookeeper-env/zk_server_heapsize', "1024"))
+zk_server_heapsize_value = zk_server_heapsize_value.strip()
+if len(zk_server_heapsize_value) > 0 and zk_server_heapsize_value[-1].isdigit():
+  zk_server_heapsize_value = zk_server_heapsize_value + "m"
 zk_server_heapsize = format("-Xmx{zk_server_heapsize_value}")
 
 client_port = default('/configurations/zoo.cfg/clientPort', None)

[07/19] ambari git commit: Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (Alejandro Fernandez via smohanty)

Posted by jo...@apache.org.
Cross-stack upgrade, Oozie restart fails with ext-2.2.zip missing error, stack_tools.py is missing get_stack_name in __all__, disable BigInsights in UI (Alejandro Fernandez via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ad9d587
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ad9d587
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ad9d587

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 0ad9d587d0d7cf88c53ae08c8ce9d7351be4a505
Parents: 421f3c6
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Jul 21 12:07:34 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Jul 21 12:07:34 2017 -0700

----------------------------------------------------------------------
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 40 +++++++++++++-------
 .../package/scripts/oozie_server_upgrade.py     | 35 ++++++++++-------
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 32 ++++++++++++----
 3 files changed, 74 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ad9d587/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 142e962..aa5bc30 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -215,18 +215,25 @@ def oozie_ownership():
     group = params.user_group
   )
 
-def get_oozie_ext_zip_source_path(upgrade_type, params):
+def get_oozie_ext_zip_source_paths(upgrade_type, params):
   """
-  Get the Oozie ext zip file path from the source stack.
+  Get an ordered list of Oozie ext zip file paths from the source stack.
   :param upgrade_type:  Upgrade type will be None if not in the middle of a stack upgrade.
   :param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file
-  :return: Source path to use for Oozie extension zip file
+  :return: Source paths to use for Oozie extension zip file
   """
   # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+  paths = []
   source_ext_js_path = params.ext_js_path
+  # Preferred location used by HDP and BigInsights 4.2.5
   if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
     source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file
-  return source_ext_js_path
+  paths.append(source_ext_js_path)
+
+  # Alternate location used by BigInsights 4.2.0 when migrating to another stack.
+  paths.append("/var/lib/oozie/" + params.ext_js_file)
+
+  return paths
 
 def oozie_server_specific(upgrade_type):
   import params
@@ -262,16 +269,23 @@ def oozie_server_specific(upgrade_type):
   )
 
   configure_cmds = []
-  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
-  source_ext_zip_path = get_oozie_ext_zip_source_path(upgrade_type, params)
-
-  configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
-  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+  source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)
+  
+  # Copy the first oozie ext-2.2.zip file that is found.
+  # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+  if source_ext_zip_paths is not None:
+    for source_ext_zip_path in source_ext_zip_paths:
+      if os.path.isfile(source_ext_zip_path):
+        configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
+        configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+
+        Execute(configure_cmds,
+                not_if=no_op_test,
+                sudo=True,
+                )
+        break
   
-  Execute( configure_cmds,
-    not_if  = no_op_test,
-    sudo = True,
-  )
   
   Directory(params.oozie_webapps_conf_dir,
             owner = params.oozie_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ad9d587/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index 719fb32..eb57c22 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -86,22 +86,31 @@ class OozieUpgrade(Script):
         raise Fail("There are no files at {0} matching {1}".format(
           hadoop_client_new_lib_dir, hadoop_lzo_pattern))
 
-    # Copy ext ZIP to libext dir
-    # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
-    source_ext_zip_path = oozie.get_oozie_ext_zip_source_path(upgrade_type, params)
-
     # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
     oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
 
-    if not os.path.isfile(source_ext_zip_path):
-      raise Fail("Unable to copy {0} because it does not exist".format(source_ext_zip_path))
-
-    Logger.info("Copying {0} to {1}".format(source_ext_zip_path, params.oozie_libext_dir))
-    Execute(("cp", source_ext_zip_path, params.oozie_libext_dir), sudo=True)
-    Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
-    File(oozie_ext_zip_target_path,
-         mode=0644
-    )
+    # Copy ext ZIP to libext dir
+    # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+    source_ext_zip_paths = oozie.get_oozie_ext_zip_source_paths(upgrade_type, params)
+
+    found_at_least_one_oozie_ext_file = False
+
+    # Copy the first oozie ext-2.2.zip file that is found.
+    # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+    if source_ext_zip_paths is not None:
+      for source_ext_zip_path in source_ext_zip_paths:
+        if os.path.isfile(source_ext_zip_path):
+          found_at_least_one_oozie_ext_file = True
+          Logger.info("Copying {0} to {1}".format(source_ext_zip_path, params.oozie_libext_dir))
+          Execute(("cp", source_ext_zip_path, params.oozie_libext_dir), sudo=True)
+          Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
+          File(oozie_ext_zip_target_path,
+               mode=0644
+               )
+          break
+
+    if not found_at_least_one_oozie_ext_file:
+      raise Fail("Unable to find any Oozie source extension files from the following paths {0}".format(source_ext_zip_paths))
 
     # Redownload jdbc driver to a new current location
     oozie.download_database_library_if_needed()

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ad9d587/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 426c36a..5ef6ad9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -44,7 +44,9 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
-  def test_configure_default(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default(self, isfile_mock, call_mocks):
+    isfile_mock.return_value = True
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -59,7 +61,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, False, True]))
-  def test_configure_default_mysql(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default_mysql(self, isfile_mock, iscall_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -281,7 +286,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, False, True]))
-  def test_configure_existing_sqla(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_existing_sqla(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -602,7 +610,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
-  def test_configure_secured(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_secured(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
@@ -616,8 +627,11 @@ class TestOozieServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(shell, "call")
+  @patch("os.path.isfile")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
-  def test_configure_secured_ha(self, call_mocks):
+  def test_configure_secured_ha(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effects = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
 
     config_file = "stacks/2.0.6/configs/secured.json"
@@ -650,7 +664,8 @@ class TestOozieServer(RMFTestCase):
   @patch("os.path.isfile")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
   def test_start_secured(self, isfile_mock, call_mocks):
-    isfile_mock.return_value = True
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effects = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
@@ -1133,7 +1148,10 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True]))
-  def test_configure_default_hdp22(self, call_mocks):
+  @patch("os.path.isfile")
+  def test_configure_default_hdp22(self, isfile_mock, call_mocks):
+    # Mock call when checking if need to copy oozie ext-2.2.zip file
+    isfile_mock.side_effect = [True, False]
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     config_file = "stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:


[11/19] ambari git commit: AMBARI-21157. Logging cleanup around reading config properties file

Posted by jo...@apache.org.
AMBARI-21157. Logging cleanup around reading config properties file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e57032d2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e57032d2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e57032d2

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: e57032d26633b0a451b123d3109672b638dc40a9
Parents: 61df697
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Sat Jul 22 18:09:54 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Jul 22 18:09:54 2017 -0700

----------------------------------------------------------------------
 .../org/apache/ambari/server/configuration/Configuration.java  | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e57032d2/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index fa5ed71..7567755 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -3946,7 +3946,11 @@ public class Configuration {
       if (result != null) {
         password = new String(result);
       } else {
-        LOG.error("Cannot read password for alias = " + aliasStr);
+        if (CredentialProvider.isAliasString(aliasStr)) {
+          LOG.error("Cannot read password for alias = " + aliasStr);
+        } else {
+          LOG.warn("Raw password provided, not an alias. It cannot be read from credential store.");
+        }
       }
     }
     return password;


[10/19] ambari git commit: AMBARI-21555 Hive restart fails to restart MySQL after Ambari upgrade against IOP 4.2.5 (dili)

Posted by jo...@apache.org.
AMBARI-21555 Hive restart fails to restart MySQL after Ambari upgrade against IOP 4.2.5 (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/61df6972
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/61df6972
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/61df6972

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 61df6972d3105baf2dd7718fe0d26c711d23beed
Parents: f0b0314
Author: Di Li <di...@apache.org>
Authored: Fri Jul 21 22:35:58 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Fri Jul 21 22:35:58 2017 -0400

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog252.java       | 31 ++++++++++++++++++++
 1 file changed, 31 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/61df6972/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index ea1b034..ca7ab3f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
@@ -36,6 +37,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
@@ -63,6 +65,9 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
 
   private static final String CLUSTER_ENV = "cluster-env";
 
+  private static final String HIVE_ENV = "hive-env";
+  private static final String MARIADB_REDHAT_SUPPORT = "mariadb_redhat_support";
+
   private static final List<String> configTypesToEnsureSelected = Arrays.asList("spark2-javaopts-properties");
   
   /**
@@ -119,6 +124,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     resetStackToolsAndFeatures();
     ensureConfigTypesHaveAtLeastOneVersionSelected();
+    updateMariaDBRedHatSupportHive();
   }
 
   /**
@@ -296,4 +302,29 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
       }
     }
   }
+
+  /**
+   * Insert mariadb_redhat_support to hive-env if the current stack is BigInsights 4.2.5
+   * @throws AmbariException
+   * */
+  private void updateMariaDBRedHatSupportHive() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> installedServices = cluster.getServices().keySet();
+          if (installedServices.contains("HIVE")) {
+            StackId currentStack = cluster.getCurrentStackVersion();
+            if (currentStack.getStackName().equals("BigInsights") && currentStack.getStackVersion().equals("4.2.5")) {
+              Map<String, String> newProperties = new HashMap<>();
+              newProperties.put(MARIADB_REDHAT_SUPPORT, "true");
+              updateConfigurationPropertiesForCluster(cluster, HIVE_ENV, newProperties, true, false);
+            }
+          }
+        }
+      }
+    }
+  }
 }


[17/19] ambari git commit: AMBARI-21564 Select version page load failed on cluster installation. (atkach)

Posted by jo...@apache.org.
AMBARI-21564 Select version page load failed on cluster installation. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/37b5f232
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/37b5f232
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/37b5f232

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 37b5f2323375242846364b3979a563a1a20f2d5d
Parents: 439da8b
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 24 18:35:48 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Tue Jul 25 12:34:04 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/views/wizard/step1_view.js       |  4 +-
 ambari-web/test/views/wizard/step1_view_test.js | 48 +++++++++++++++++++-
 2 files changed, 49 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/37b5f232/ambari-web/app/views/wizard/step1_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/wizard/step1_view.js b/ambari-web/app/views/wizard/step1_view.js
index bfabc02..3e74f23 100644
--- a/ambari-web/app/views/wizard/step1_view.js
+++ b/ambari-web/app/views/wizard/step1_view.js
@@ -197,10 +197,10 @@ App.WizardStep1View = Em.View.extend({
    * @type {bool}
    */
   isNoOsFilled: function () {
-    if (this.get('controller.selectedStack.useRedhatSatellite')) {
+    var operatingSystems = this.get('controller.selectedStack.operatingSystems');
+    if (this.get('controller.selectedStack.useRedhatSatellite') || Em.isNone(operatingSystems)) {
       return false;
     }
-    var operatingSystems = this.get('controller.selectedStack.operatingSystems');
     var selectedOS = operatingSystems.filterProperty('isSelected', true);
     return selectedOS.everyProperty('isNotFilled', true);
   }.property('controller.selectedStack.operatingSystems.@each.isSelected', 'controller.selectedStack.operatingSystems.@each.isNotFilled', 'controller.selectedStack.useRedhatSatellite'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/37b5f232/ambari-web/test/views/wizard/step1_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/wizard/step1_view_test.js b/ambari-web/test/views/wizard/step1_view_test.js
index 1eceab1..84957a9 100644
--- a/ambari-web/test/views/wizard/step1_view_test.js
+++ b/ambari-web/test/views/wizard/step1_view_test.js
@@ -22,7 +22,9 @@ require('views/wizard/step1_view');
 var view;
 
 function getView() {
-  return App.WizardStep1View.create();
+  return App.WizardStep1View.create({
+    controller: Em.Object.create()
+  });
 }
 
 describe('App.WizardStep1View', function () {
@@ -55,4 +57,48 @@ describe('App.WizardStep1View', function () {
       expect(repository.get('validation')).to.be.empty;
     });
   });
+
+  describe('#isNoOsFilled', function() {
+
+    it('should be false when useRedhatSatellite is true', function() {
+      view.set('controller.selectedStack', Em.Object.create({
+        useRedhatSatellite: true
+      }));
+      expect(view.get('isNoOsFilled')).to.be.false;
+    });
+
+    it('should be false when operatingSystems is null', function() {
+      view.set('controller.selectedStack', Em.Object.create({
+        useRedhatSatellite: false,
+        operatingSystems: null
+      }));
+      expect(view.get('isNoOsFilled')).to.be.false;
+    });
+
+    it('should be false when operatingSystem is filled', function() {
+      view.set('controller.selectedStack', Em.Object.create({
+        useRedhatSatellite: false,
+        operatingSystems: [
+          Em.Object.create({
+            isSelected: true,
+            isNotFilled: false
+          })
+        ]
+      }));
+      expect(view.get('isNoOsFilled')).to.be.false;
+    });
+
+    it('should be true when operatingSystem is not filled', function() {
+      view.set('controller.selectedStack', Em.Object.create({
+        useRedhatSatellite: false,
+        operatingSystems: [
+          Em.Object.create({
+            isSelected: true,
+            isNotFilled: true
+          })
+        ]
+      }));
+      expect(view.get('isNoOsFilled')).to.be.true;
+    });
+  });
 });
\ No newline at end of file


[12/19] ambari git commit: AMBARI-21539. Resource Manager fails to restart properly during an IOP to HDP upgrade (amagyar)

Posted by jo...@apache.org.
AMBARI-21539. Resource Manager fails to restart properly during an IOP to HDP upgrade (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dae2c389
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dae2c389
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dae2c389

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: dae2c3890314903a47dbba075f63945a2acfea1b
Parents: e57032d
Author: Attila Magyar <am...@hortonworks.com>
Authored: Sun Jul 23 19:31:01 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Sun Jul 23 19:31:01 2017 +0200

----------------------------------------------------------------------
 .../BigInsights/4.2.5/upgrades/config-upgrade.xml    |  8 ++++++++
 .../4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 15 ++++++++++++++-
 .../BigInsights/4.2/upgrades/config-upgrade.xml      |  8 ++++++++
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml   | 14 ++++++++++++++
 4 files changed, 44 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dae2c389/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index 13c0518..87a2aef 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -33,6 +33,10 @@
             <transfer operation="delete" delete-key="dfs.namenode.https-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
             <transfer operation="delete" delete-key="dfs.namenode.rpc-address" if-type="core-site" if-key="fs.defaultFS" if-key-state="present"/> <!-- Make sure to use the existing fs.defaultFS for the non-HA case -->
           </definition>
+          <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
+            <type>hadoop-env</type>
+            <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
         </changes>
       </component>
     </service>
@@ -44,6 +48,10 @@
             <type>yarn-site</type>
             <replace key="yarn.nodemanager.aux-services" find=",spark_shuffle" replace-with=""/>
           </definition>
+          <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
+            <type>yarn-env</type>
+            <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/dae2c389/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 1932364..a7ddd5c 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -123,7 +123,6 @@
           <function>prepare_express_upgrade</function>
         </task>
       </execute-stage>
-
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
         <task xsi:type="manual">
           <message>Before continuing, please backup the Ranger Admin database on the following host(s): {{hosts.all}}.</message>
@@ -206,12 +205,26 @@
         <task xsi:type="configure" id="biginsights_4_2_namenode_adjustments" />
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Adding HDFS ZKFC Security ACLs">
+        <task xsi:type="configure" id="hadoop_env_zkfc_security_opts">
+          <summary>Adding HDFS ZKFC Security ACLs</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Update Yarn configurations">
         <task xsi:type="configure" id="biginsights_4_2_yarn_config_update" />
       </execute-stage>
 
       <!-- YARN -->
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Adding YARN Security ACLs">
+        <task xsi:type="configure" id="yarn_env_security_opts">
+          <summary>Adding YARN Security ACLs</summary>
+        </task>
+      </execute-stage>
+
+      <!-- YARN -->
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>

http://git-wip-us.apache.org/repos/asf/ambari/blob/dae2c389/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index 297aebb..6d00a90 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -125,6 +125,10 @@
             <transfer operation="delete" delete-key="dfs.namenode.https-address" if-type="hdfs-site" if-key="dfs.nameservices" if-key-state="present"/>
             <transfer operation="delete" delete-key="dfs.namenode.rpc-address" if-type="core-site" if-key="fs.defaultFS" if-key-state="present"/> <!-- Make sure to use the existing fs.defaultFS for the non-HA case -->
           </definition>
+          <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
+            <type>hadoop-env</type>
+            <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
         </changes>
       </component>
     </service>
@@ -136,6 +140,10 @@
             <type>yarn-site</type>
             <replace key="yarn.nodemanager.aux-services" find=",spark_shuffle" replace-with=""/>
           </definition>
+          <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
+            <type>yarn-env</type>
+            <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/dae2c389/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index b6d39ea..cedc90f 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -200,12 +200,26 @@
         <task xsi:type="configure" id="biginsights_4_2_namenode_adjustments" />
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Adding HDFS ZKFC Security ACLs">
+        <task xsi:type="configure" id="hadoop_env_zkfc_security_opts">
+          <summary>Adding HDFS ZKFC Security ACLs</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Update Yarn configurations">
         <task xsi:type="configure" id="biginsights_4_2_yarn_config_update" />
       </execute-stage>
 
       <!-- YARN -->
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Adding YARN Security ACLs">
+        <task xsi:type="configure" id="yarn_env_security_opts">
+          <summary>Adding YARN Security ACLs</summary>
+        </task>
+      </execute-stage>
+
+      <!-- YARN -->
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>


[08/19] ambari git commit: AMBARI-21553 Oozie server failed to restart post IOP 4.2 migration (dili)

Posted by jo...@apache.org.
AMBARI-21553 Oozie server failed to restart post IOP 4.2 migration (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2441f68a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2441f68a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2441f68a

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 2441f68ab6ea54c7ea30e0ae1a01ff9866d04106
Parents: 0ad9d58
Author: Di Li <di...@apache.org>
Authored: Fri Jul 21 16:08:31 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Fri Jul 21 16:08:31 2017 -0400

----------------------------------------------------------------------
 .../stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml          | 2 +-
 .../resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml  | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2441f68a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index c8549b3..13c0518 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -215,7 +215,7 @@
             <type>oozie-env</type>
             <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />            
           </definition>
-          <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Updating oozie env">
+          <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Update oozie env">
             <type>oozie-env</type>
             <replace key="content" find="/usr/lib/bigtop-tomcat7-7.0.75" replace-with="/usr/lib/bigtop-tomcat" />
           </definition>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2441f68a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index bf6dd6b..297aebb 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -238,8 +238,9 @@
           <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_configurations" summary="Update oozie env">
             <type>oozie-env</type>
             <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />            
+            <replace key="content" find="export CATALINA_OPTS=&quot;$CATALINA_OPTS -Xms{{oozie_initial_heapsize}}" replace-with="export CATALINA_OPTS=&quot;$CATALINA_OPTS"/>
           </definition>
-          <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Updating oozie env">
+          <definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Update oozie env">
             <type>oozie-env</type>
             <replace key="content" find="/usr/lib/bigtop-tomcat7-7.0.75" replace-with="/usr/lib/bigtop-tomcat" />
           </definition>

[19/19] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21450

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21450


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/54c57662
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/54c57662
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/54c57662

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 54c57662b70aac566f50f6ae36bd625e9fc480a3
Parents: 54c4b49 cfedbdf
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Jul 25 10:27:39 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 25 10:27:39 2017 -0400

----------------------------------------------------------------------
 .../resource_management/core/files/killtree.sh  |   2 +-
 .../libraries/providers/hdfs_resource.py        |  48 ++-
 .../server/api/services/AmbariMetaInfo.java     |  34 +-
 .../server/configuration/Configuration.java     |   6 +-
 .../AmbariCustomCommandExecutionHelper.java     |   5 +
 .../AmbariManagementControllerImpl.java         |  43 ++-
 .../ambari/server/controller/AmbariServer.java  |  10 +-
 .../state/alert/AlertDefinitionFactory.java     |  31 +-
 .../ambari/server/state/alert/ScriptSource.java |   4 +
 .../upgrade/AbstractFinalUpgradeCatalog.java    |  58 ++++
 .../server/upgrade/FinalUpgradeCatalog.java     |  45 +--
 .../server/upgrade/SchemaUpgradeHelper.java     |   1 +
 .../server/upgrade/UpdateAlertScriptPaths.java  |  48 +++
 .../server/upgrade/UpgradeCatalog252.java       |  31 ++
 .../main/python/ambari_server/setupSecurity.py  |   4 -
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  40 ++-
 .../package/scripts/oozie_server_upgrade.py     |  35 +-
 .../3.4.5/package/scripts/params_linux.py       |   5 +-
 .../before-ANY/scripts/shared_initialization.py |   2 +-
 .../4.0/services/AMBARI_METRICS/alerts.json     |  25 --
 .../before-ANY/scripts/shared_initialization.py |   2 +-
 .../4.2.5/upgrades/config-upgrade.xml           |  10 +-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |  17 +-
 .../4.2/services/AMBARI_METRICS/alerts.json     |  25 --
 .../configuration/ams-grafana-env.xml           |  93 ++++++
 .../configuration/ams-grafana-ini.xml           | 320 +++++++++++++++++++
 .../configuration/ams-hbase-site.xml            |  90 +++++-
 .../AMBARI_METRICS/configuration/ams-site.xml   | 215 +++++++++++--
 .../configuration/ams-ssl-client.xml            |  51 +++
 .../configuration/ams-ssl-server.xml            |  80 +++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  11 +-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |  14 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  33 +-
 .../server/api/services/AmbariMetaInfoTest.java |   6 +-
 .../src/test/python/TestAmbariServer.py         |  13 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |  32 +-
 .../app/mappers/components_state_mapper.js      |   5 +
 .../app/mappers/service_metrics_mapper.js       |   3 +
 ambari-web/app/messages.js                      |   3 +
 ambari-web/app/models/service/hbase.js          |   3 +
 .../templates/main/service/services/hbase.hbs   |  20 ++
 .../app/views/main/service/services/hbase.js    |   8 +
 ambari-web/app/views/wizard/step1_view.js       |   4 +-
 ambari-web/test/views/wizard/step1_view_test.js |  48 ++-
 44 files changed, 1323 insertions(+), 260 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index a1d2b87,77f683c..35e5b10
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@@ -178,8 -178,22 +178,9 @@@ public class SchemaUpgradeHelper 
        // Add binding to each newly created catalog
        Multibinder<UpgradeCatalog> catalogBinder =
          Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog200.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog210.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog211.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog212.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog2121.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog220.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog221.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog222.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog230.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog240.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog2402.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog242.class);
 -      catalogBinder.addBinding().to(UpgradeCatalog250.class);
        catalogBinder.addBinding().to(UpgradeCatalog251.class);
        catalogBinder.addBinding().to(UpgradeCatalog252.class);
+       catalogBinder.addBinding().to(UpdateAlertScriptPaths.class);
        catalogBinder.addBinding().to(FinalUpgradeCatalog.class);
  
        EventBusSynchronizer.synchronizeAmbariEventPublisher(binder());

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 0fcf779,ca7ab3f..086d335
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@@ -27,18 -27,18 +27,20 @@@ import java.util.Map
  import java.util.Set;
  
  import org.apache.ambari.server.AmbariException;
+ import org.apache.ambari.server.controller.AmbariManagementController;
  import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
  import org.apache.ambari.server.orm.dao.ClusterDAO;
 -import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
 +import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
  import org.apache.ambari.server.orm.entities.ClusterEntity;
  import org.apache.ambari.server.state.Cluster;
  import org.apache.ambari.server.state.Clusters;
  import org.apache.ambari.server.state.Config;
  import org.apache.ambari.server.state.ConfigHelper;
  import org.apache.ambari.server.state.PropertyInfo;
+ import org.apache.ambari.server.state.StackId;
  import org.apache.commons.lang.StringUtils;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
  
  import com.google.common.collect.Sets;
  import com.google.inject.Inject;
@@@ -63,8 -65,11 +65,11 @@@ public class UpgradeCatalog252 extends 
  
    private static final String CLUSTER_ENV = "cluster-env";
  
+   private static final String HIVE_ENV = "hive-env";
+   private static final String MARIADB_REDHAT_SUPPORT = "mariadb_redhat_support";
+ 
    private static final List<String> configTypesToEnsureSelected = Arrays.asList("spark2-javaopts-properties");
 -  
 +
    /**
     * Logger.
     */

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 5ee3c8a,bfb3c31..229b864
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@@ -1691,14 -1867,11 +1691,14 @@@ public class AmbariMetaInfoTest 
      Clusters clusters = injector.getInstance(Clusters.class);
      Cluster cluster = clusters.getClusterById(clusterId);
      cluster.setDesiredStackVersion(
 -        new StackId(STACK_NAME_HDP, "2.0.6"));
 +        new StackId(STACK_NAME_HDP, stackVersion));
 +
 +    RepositoryVersionEntity repositoryVersion = ormHelper.getOrCreateRepositoryVersion(
 +        cluster.getCurrentStackVersion(), repoVersion);
  
 -    cluster.addService("HDFS");
 +    cluster.addService("HDFS", repositoryVersion);
  
-     metaInfo.reconcileAlertDefinitions(clusters);
+     metaInfo.reconcileAlertDefinitions(clusters, false);
  
      AlertDefinitionDAO dao = injector.getInstance(AlertDefinitionDAO.class);
      List<AlertDefinitionEntity> definitions = dao.findAll(clusterId);
@@@ -1722,10 -1895,10 +1722,10 @@@
        dao.merge(definition);
      }
  
-     metaInfo.reconcileAlertDefinitions(clusters);
+     metaInfo.reconcileAlertDefinitions(clusters, false);
  
      definitions = dao.findAll();
 -    assertEquals(12, definitions.size());
 +    assertEquals(13, definitions.size());
  
      for (AlertDefinitionEntity definition : definitions) {
        assertEquals(28, definition.getScheduleInterval().intValue());
@@@ -1753,10 -1926,10 +1753,10 @@@
  
      // verify the new definition is found (6 HDFS + 1 new one)
      definitions = dao.findAllEnabled(cluster.getClusterId());
 -    assertEquals(12, definitions.size());
 +    assertEquals(13, definitions.size());
  
      // reconcile, which should disable our bad definition
-     metaInfo.reconcileAlertDefinitions(clusters);
+     metaInfo.reconcileAlertDefinitions(clusters, false);
  
      // find all enabled for the cluster should find 6
      definitions = dao.findAllEnabled(cluster.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/54c57662/ambari-web/app/messages.js
----------------------------------------------------------------------


[04/19] ambari git commit: AMBARI-21053 Reverting Beacon stack advisor recommendation (mugdha)

Posted by jo...@apache.org.
AMBARI-21053 Reverting Beacon stack advisor recommendation (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/caec39f1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/caec39f1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/caec39f1

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: caec39f10d8be65b79f8eafacef4abf82dc33766
Parents: a7cc380
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Fri Jul 21 12:38:57 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Fri Jul 21 12:46:45 2017 +0530

----------------------------------------------------------------------
 .../stacks/HDP/2.6/services/stack_advisor.py    | 33 +-------------------
 1 file changed, 1 insertion(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/caec39f1/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index c1c1f13..cc5fa92 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -41,42 +41,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
         "HIVE": self.recommendHIVEConfigurations,
         "HBASE": self.recommendHBASEConfigurations,
         "YARN": self.recommendYARNConfigurations,
-        "KAFKA": self.recommendKAFKAConfigurations,
-        "BEACON": self.recommendBEACONConfigurations
+        "KAFKA": self.recommendKAFKAConfigurations
       }
       parentRecommendConfDict.update(childRecommendConfDict)
       return parentRecommendConfDict
 
-  def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
-    beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
-    putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
-
-    # database URL and driver class recommendations
-    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
-      putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
-    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
-      beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
-      beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
-      protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
-      oldSchemaName = getOldValue(self, services, "beacon-env", "beacon_store_db_name")
-      oldDBType = getOldValue(self, services, "beacon-env", "beacon_database")
-      # under these if constructions we are checking if beacon server hostname available,
-      # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
-      # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
-      if beaconServerHost is not None:
-        if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
-          dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
-          putbeaconEnvProperty('beacon_store_url', dbConnection)
-
-  def getDBConnectionStringBeacon(self, databaseType):
-    driverDict = {
-      'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
-      'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
-      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
-      'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
-    }
-    return driverDict.get(databaseType.upper())
-
   def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]

[18/19] ambari git commit: AMBARI-21559. Alert definition paths should be updated on upgrade

Posted by jo...@apache.org.
AMBARI-21559. Alert definition paths should be updated on upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cfedbdf7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cfedbdf7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cfedbdf7

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: cfedbdf7812cab362ae33b4cdc9b5215837acb70
Parents: 37b5f23
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Jul 24 13:06:29 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue Jul 25 16:20:57 2017 +0200

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     | 34 ++++++++++--
 .../ambari/server/controller/AmbariServer.java  | 10 ++--
 .../state/alert/AlertDefinitionFactory.java     | 31 ++++++++---
 .../ambari/server/state/alert/ScriptSource.java |  4 ++
 .../upgrade/AbstractFinalUpgradeCatalog.java    | 58 ++++++++++++++++++++
 .../server/upgrade/FinalUpgradeCatalog.java     | 45 ++++-----------
 .../server/upgrade/SchemaUpgradeHelper.java     |  3 +-
 .../server/upgrade/UpdateAlertScriptPaths.java  | 48 ++++++++++++++++
 .../server/api/services/AmbariMetaInfoTest.java |  6 +-
 9 files changed, 182 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 2d13cba..91e3ed0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -35,6 +35,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Scanner;
 import java.util.Set;
 
@@ -73,6 +74,9 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.alert.AlertDefinition;
 import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
+import org.apache.ambari.server.state.alert.ScriptSource;
+import org.apache.ambari.server.state.alert.Source;
+import org.apache.ambari.server.state.alert.SourceType;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
@@ -1086,9 +1090,10 @@ public class AmbariMetaInfo {
    * service.
    *
    * @param clusters all clusters
-   * @throws AmbariException
+   * @param updateScriptPaths whether existing script-based alerts should be updated
+   *        with possibly new paths from the stack definition
    */
-  public void reconcileAlertDefinitions(Clusters clusters)
+  public void reconcileAlertDefinitions(Clusters clusters, boolean updateScriptPaths)
       throws AmbariException {
 
     Map<String, Cluster> clusterMap = clusters.getClusters();
@@ -1167,6 +1172,26 @@ public class AmbariMetaInfo {
           LOG.debug(
               "The alert named {} has been modified from the stack definition and will not be merged",
               stackDefinition.getName());
+
+          if (updateScriptPaths) {
+            Source databaseSource = databaseDefinition.getSource();
+            Source stackSource = stackDefinition.getSource();
+            if (databaseSource.getType() == SourceType.SCRIPT && stackSource.getType() == SourceType.SCRIPT) {
+              ScriptSource databaseScript = (ScriptSource) databaseSource;
+              ScriptSource stackScript = (ScriptSource) stackSource;
+              String oldPath = databaseScript.getPath();
+              String newPath = stackScript.getPath();
+              if (!Objects.equals(oldPath, newPath)) {
+                databaseScript.setPath(newPath);
+                entity = alertDefinitionFactory.mergeSource(databaseScript, entity);
+                persist.add(entity);
+
+                LOG.info("Updating script path for the alert named {} from '{}' to '{}'",
+                  stackDefinition.getName(), oldPath, newPath
+                );
+              }
+            }
+          }
         }
       }
 
@@ -1196,10 +1221,7 @@ public class AmbariMetaInfo {
 
       // persist any new or updated definition
       for (AlertDefinitionEntity entity : persist) {
-        if (LOG.isDebugEnabled()) {
-          LOG.info("Merging Alert Definition {} into the database",
-              entity.getDefinitionName());
-        }
+        LOG.debug("Merging Alert Definition {} into the database", entity.getDefinitionName());
         alertDefinitionDao.createOrUpdate(entity);
       }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index e8c986b..1ebcac2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -19,9 +19,6 @@
 package org.apache.ambari.server.controller;
 
 
-import javax.crypto.BadPaddingException;
-import javax.servlet.DispatcherType;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.Authenticator;
@@ -33,6 +30,9 @@ import java.util.Enumeration;
 import java.util.Map;
 import java.util.logging.LogManager;
 
+import javax.crypto.BadPaddingException;
+import javax.servlet.DispatcherType;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.StateRecoveryManager;
 import org.apache.ambari.server.StaticallyInject;
@@ -99,10 +99,10 @@ import org.apache.ambari.server.security.CertificateManager;
 import org.apache.ambari.server.security.SecurityFilter;
 import org.apache.ambari.server.security.authorization.AmbariLdapAuthenticationProvider;
 import org.apache.ambari.server.security.authorization.AmbariLocalUserProvider;
+import org.apache.ambari.server.security.authorization.AmbariPamAuthenticationProvider;
 import org.apache.ambari.server.security.authorization.AmbariUserAuthorizationFilter;
 import org.apache.ambari.server.security.authorization.PermissionHelper;
 import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.security.authorization.AmbariPamAuthenticationProvider;
 import org.apache.ambari.server.security.authorization.internal.AmbariInternalAuthenticationProvider;
 import org.apache.ambari.server.security.ldap.AmbariLdapDataPopulator;
 import org.apache.ambari.server.security.unsecured.rest.CertificateDownload;
@@ -523,7 +523,7 @@ public class AmbariServer {
       LOG.info(clusterDump.toString());
 
       LOG.info("********* Reconciling Alert Definitions **********");
-      ambariMetaInfo.reconcileAlertDefinitions(clusters);
+      ambariMetaInfo.reconcileAlertDefinitions(clusters, false);
 
       LOG.info("********* Initializing ActionManager **********");
       ActionManager manager = injector.getInstance(ActionManager.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
index acbb881..769920f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
@@ -249,7 +249,6 @@ public class AlertDefinitionFactory {
     entity.setDefinitionName(definition.getName());
     entity.setEnabled(definition.isEnabled());
     entity.setHostIgnored(definition.isHostIgnored());
-    entity.setHash(UUID.randomUUID().toString());
     entity.setLabel(definition.getLabel());
     entity.setDescription(definition.getDescription());
     entity.setScheduleInterval(definition.getInterval());
@@ -263,24 +262,42 @@ public class AlertDefinitionFactory {
 
     entity.setScope(scope);
 
-    Source source = definition.getSource();
+    return mergeSource(definition.getSource(), entity);
+  }
+
+  /**
+   * Updates source and source type of <code>entity</code> from <code>source</code>.
+   * Also updates UUID, which must be done for any change in to the entity for it
+   * to take effect on the agents.
+   *
+   * @return the updated entity to be persisted, or null if alert source cannot be serialized to JSON
+   */
+  public AlertDefinitionEntity mergeSource(Source source, AlertDefinitionEntity entity) {
     entity.setSourceType(source.getType());
 
     try {
       String sourceJson = m_gson.toJson(source);
       entity.setSource(sourceJson);
-    } catch (Exception exception) {
-      LOG.error(
-          "Unable to serialize the alert definition source during coercion",
-          exception);
-
+    } catch (Exception e) {
+      LOG.error("Unable to serialize the alert definition source during merge", e);
       return null;
     }
 
+    assignNewUUID(entity);
+
     return entity;
   }
 
   /**
+   * Updates <code>entity</code> with a new UUID.
+   */
+  private static void assignNewUUID(AlertDefinitionEntity entity) {
+    if (entity != null) {
+      entity.setHash(UUID.randomUUID().toString());
+    }
+  }
+
+  /**
    * Gets an instance of {@link Gson} that can correctly serialize and
    * deserialize an {@link AlertDefinition}.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ScriptSource.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ScriptSource.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ScriptSource.java
index d1b7070..eed6f7c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ScriptSource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ScriptSource.java
@@ -37,6 +37,10 @@ public class ScriptSource extends ParameterizedSource {
     return m_path;
   }
 
+  public void setPath(String path) {
+    m_path = path;
+  }
+
   /**
    * {@inheritDoc}
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractFinalUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractFinalUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractFinalUpgradeCatalog.java
new file mode 100644
index 0000000..9abf548
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractFinalUpgradeCatalog.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.utils.VersionUtils;
+
+import com.google.inject.Injector;
+
+/**
+ * Upgrade catalog which is executed after all version-dependent catalogs.
+ */
+public abstract class AbstractFinalUpgradeCatalog extends AbstractUpgradeCatalog {
+  AbstractFinalUpgradeCatalog(Injector injector) {
+    super(injector);
+  }
+
+  @Override
+  protected void executeDDLUpdates() throws AmbariException, SQLException {
+    //no-op
+  }
+
+  @Override
+  protected void executePreDMLUpdates() throws AmbariException, SQLException {
+    //no-op
+  }
+
+  @Override
+  public String getTargetVersion() {
+    return getFinalVersion();
+  }
+
+  @Override
+  public boolean isFinal() {
+    return true;
+  }
+
+  private String getFinalVersion() {
+    return VersionUtils.getVersionSubstring(configuration.getServerVersion());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java
index 1ac0118..dad0ecf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java
@@ -18,30 +18,30 @@
 
 package org.apache.ambari.server.upgrade;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.utils.VersionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
 
 /**
  * Final upgrade catalog which simply updates database version (in case if no db changes between releases)
  */
-public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
+public class FinalUpgradeCatalog extends AbstractFinalUpgradeCatalog {
 
   /**
    * Logger.
@@ -54,16 +54,6 @@ public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
   }
 
   @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    //noop
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //noop
-  }
-
-  @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     updateClusterEnv();
   }
@@ -77,8 +67,6 @@ public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
    *
    * Note: Config properties stack_features and stack_tools should always be updated to latest values as defined
    * in the stack on an Ambari upgrade.
-   *
-   * @throws Exception
    */
   protected void updateClusterEnv() throws AmbariException {
 
@@ -104,17 +92,4 @@ public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
     }
   }
 
-  @Override
-  public String getTargetVersion() {
-    return getFinalVersion();
-  }
-
-  @Override
-  public boolean isFinal() {
-    return true;
-  }
-
-  private String getFinalVersion() {
-    return VersionUtils.getVersionSubstring(configuration.getServerVersion());
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index af2a7d1..77f683c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -39,13 +39,13 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.ambari.server.utils.VersionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.support.JdbcUtils;
 
 import com.google.inject.Guice;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.multibindings.Multibinder;
 import com.google.inject.persist.PersistService;
-import org.springframework.jdbc.support.JdbcUtils;
 
 public class SchemaUpgradeHelper {
   private static final Logger LOG = LoggerFactory.getLogger
@@ -193,6 +193,7 @@ public class SchemaUpgradeHelper {
       catalogBinder.addBinding().to(UpgradeCatalog250.class);
       catalogBinder.addBinding().to(UpgradeCatalog251.class);
       catalogBinder.addBinding().to(UpgradeCatalog252.class);
+      catalogBinder.addBinding().to(UpdateAlertScriptPaths.class);
       catalogBinder.addBinding().to(FinalUpgradeCatalog.class);
 
       EventBusSynchronizer.synchronizeAmbariEventPublisher(binder());

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpdateAlertScriptPaths.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpdateAlertScriptPaths.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpdateAlertScriptPaths.java
new file mode 100644
index 0000000..087a5af
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpdateAlertScriptPaths.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import java.sql.SQLException;
+
+import javax.inject.Inject;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.state.Clusters;
+
+import com.google.inject.Injector;
+
+/**
+ * Updates script-based alert definitions with paths from the stack.
+ */
+public class UpdateAlertScriptPaths extends AbstractFinalUpgradeCatalog {
+
+  @Inject
+  public UpdateAlertScriptPaths(Injector injector) {
+    super(injector);
+  }
+
+  @Override
+  protected void executeDMLUpdates() throws AmbariException, SQLException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    ambariMetaInfo.reconcileAlertDefinitions(clusters, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cfedbdf7/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index bc1c19a..bfb3c31 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1871,7 +1871,7 @@ public class AmbariMetaInfoTest {
 
     cluster.addService("HDFS");
 
-    metaInfo.reconcileAlertDefinitions(clusters);
+    metaInfo.reconcileAlertDefinitions(clusters, false);
 
     AlertDefinitionDAO dao = injector.getInstance(AlertDefinitionDAO.class);
     List<AlertDefinitionEntity> definitions = dao.findAll(clusterId);
@@ -1895,7 +1895,7 @@ public class AmbariMetaInfoTest {
       dao.merge(definition);
     }
 
-    metaInfo.reconcileAlertDefinitions(clusters);
+    metaInfo.reconcileAlertDefinitions(clusters, false);
 
     definitions = dao.findAll();
     assertEquals(12, definitions.size());
@@ -1929,7 +1929,7 @@ public class AmbariMetaInfoTest {
     assertEquals(12, definitions.size());
 
     // reconcile, which should disable our bad definition
-    metaInfo.reconcileAlertDefinitions(clusters);
+    metaInfo.reconcileAlertDefinitions(clusters, false);
 
     // find all enabled for the cluster should find 6
     definitions = dao.findAllEnabled(cluster.getClusterId());


[03/19] ambari git commit: AMBARI-21542. AMS fail to start after IOP 4.2 to HDP 2.6.2 upgrade. (swagle)

Posted by jo...@apache.org.
AMBARI-21542. AMS fail to start after IOP 4.2 to HDP 2.6.2 upgrade. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a7cc3801
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a7cc3801
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a7cc3801

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: a7cc3801a317bf95d6037250ea223e67d54502d2
Parents: ab1d378
Author: Di Li <di...@apache.org>
Authored: Thu Jul 20 20:46:09 2017 -0400
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Jul 20 18:48:55 2017 -0700

----------------------------------------------------------------------
 .../configuration/ams-grafana-env.xml           |  93 ++++++
 .../configuration/ams-grafana-ini.xml           | 320 +++++++++++++++++++
 .../configuration/ams-hbase-site.xml            |  90 +++++-
 .../AMBARI_METRICS/configuration/ams-site.xml   | 215 +++++++++++--
 .../configuration/ams-ssl-client.xml            |  51 +++
 .../configuration/ams-ssl-server.xml            |  80 +++++
 6 files changed, 798 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml
new file mode 100644
index 0000000..eaafc6b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<configuration>
+  <property>
+    <name>metrics_grafana_log_dir</name>
+    <value>/var/log/ambari-metrics-grafana</value>
+    <display-name>Metrics Grafana log dir</display-name>
+    <description>Metrics Grafana log directory.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>metrics_grafana_pid_dir</name>
+    <value>/var/run/ambari-metrics-grafana</value>
+    <display-name>Metrics Grafana pid dir</display-name>
+    <description>Metrics Grafana pid directory.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>metrics_grafana_data_dir</name>
+    <value>/var/lib/ambari-metrics-grafana</value>
+    <display-name>Metrics Grafana data dir</display-name>
+    <description>Metrics Grafana data directory.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>metrics_grafana_username</name>
+    <value>admin</value>
+    <display-name>Grafana Admin Username</display-name>
+    <description>
+      Metrics Grafana Username. This value cannot be modified by Ambari
+      except on initial install. Please make sure the username change in
+      Grafana is reflected in Ambari.
+    </description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property require-input="true">
+    <name>metrics_grafana_password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <display-name>Grafana Admin Password</display-name>
+    <description>
+      Metrics Grafana password. This value cannot be modified by Ambari
+      except on initial install. Please make sure the password change in
+      Grafana is reflected back in Ambari.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>ams-grafana-env template</display-name>
+    <value>
+# Set environment variables here.
+
+# AMS UI Server Home Dir
+export AMS_GRAFANA_HOME_DIR={{ams_grafana_home_dir}}
+
+# AMS UI Server Data Dir
+export AMS_GRAFANA_DATA_DIR={{ams_grafana_data_dir}}
+
+# AMS UI Server Log Dir
+export AMS_GRAFANA_LOG_DIR={{ams_grafana_log_dir}}
+
+# AMS UI Server PID Dir
+export AMS_GRAFANA_PID_DIR={{ams_grafana_pid_dir}}
+    </value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml
new file mode 100644
index 0000000..3c87ab1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml
@@ -0,0 +1,320 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<configuration>
+  <property>
+    <name>port</name>
+    <value>3000</value>
+    <description>The http port to use</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>protocol</name>
+    <value>http</value>
+    <description>Protocol (http or https)</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cert_file</name>
+    <value>/etc/ambari-metrics-grafana/conf/ams-grafana.crt</value>
+    <description>Path to grafana certificate (.crt) file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cert_key</name>
+    <value>/etc/ambari-metrics-grafana/conf/ams-grafana.key</value>
+    <description>Path to grafana certificate key (.key) file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ca_cert</name>
+    <value></value>
+    <description>Path to CA root certificate or bundle to be used to validate the Grafana certificate against.
+      For self signed certificates, this value can be the same as the value for 'cert_file'.
+      (If a path is not specified, the certificate validation is skipped)</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>ams-grafana-ini template</display-name>
+    <value>
+##################### Grafana Configuration Example #####################
+#
+# Everything has defaults so you only need to uncomment things you want to
+# change
+
+# possible values : production, development
+; app_mode = production
+
+#################################### Paths ####################################
+[paths]
+# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+#
+;data = /var/lib/grafana
+data = {{ams_grafana_data_dir}}
+#
+# Directory where grafana can store logs
+#
+;logs = /var/log/grafana
+logs = {{ams_grafana_log_dir}}
+
+
+#################################### Server ####################################
+[server]
+# Protocol (http or https)
+;protocol = http
+protocol = {{ams_grafana_protocol}}
+# The ip address to bind to, empty will bind to all interfaces
+;http_addr =
+
+# The http port  to use
+;http_port = 3000
+http_port = {{ams_grafana_port}}
+
+# The public facing domain name used to access grafana from a browser
+;domain = localhost
+
+# Redirect to correct domain if host header does not match domain
+# Prevents DNS rebinding attacks
+;enforce_domain = false
+
+# The full public facing url
+;root_url = %(protocol)s://%(domain)s:%(http_port)s/
+
+# Log web requests
+;router_logging = false
+
+# the path relative working path
+;static_root_path = public
+static_root_path = /usr/lib/ambari-metrics-grafana/public
+
+# enable gzip
+;enable_gzip = false
+
+# https certs &amp; key file
+;cert_file =
+;cert_key =
+cert_file = {{ams_grafana_cert_file}}
+cert_key = {{ams_grafana_cert_key}}
+
+#################################### Database ####################################
+[database]
+# Either "mysql", "postgres" or "sqlite3", it's your choice
+;type = sqlite3
+;host = 127.0.0.1:3306
+;name = grafana
+;user = root
+;password =
+
+# For "postgres" only, either "disable", "require" or "verify-full"
+;ssl_mode = disable
+
+# For "sqlite3" only, path relative to data_path setting
+;path = grafana.db
+
+#################################### Session ####################################
+[session]
+# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
+;provider = file
+
+# Provider config options
+# memory: not have any config yet
+# file: session dir path, is relative to grafana data_path
+# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
+# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
+# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
+;provider_config = sessions
+
+# Session cookie name
+;cookie_name = grafana_sess
+
+# If you use session in https only, default is false
+;cookie_secure = false
+
+# Session life time, default is 86400
+;session_life_time = 86400
+
+#################################### Analytics ####################################
+[analytics]
+# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+# No ip addresses are being tracked, only simple counters to track
+# running instances, dashboard and error counts. It is very helpful to us.
+# Change this option to false to disable reporting.
+;reporting_enabled = true
+
+# Google Analytics universal tracking code, only enabled if you specify an id here
+;google_analytics_ua_id =
+
+#################################### Security ####################################
+[security]
+# default admin user, created on startup
+admin_user = {{ams_grafana_admin_user}}
+
+# default admin password, can be changed before first start of grafana,  or in profile settings
+;admin_password =
+
+# used for signing
+;secret_key = SW2YcwTIb9zpOOhoPsMm
+
+# Auto-login remember days
+;login_remember_days = 7
+;cookie_username = grafana_user
+;cookie_remember_name = grafana_remember
+
+# disable gravatar profile images
+;disable_gravatar = false
+
+# data source proxy whitelist (ip_or_domain:port seperated by spaces)
+;data_source_proxy_whitelist =
+
+#################################### Users ####################################
+[users]
+# disable user signup / registration
+;allow_sign_up = true
+
+# Allow non admin users to create organizations
+;allow_org_create = true
+
+# Set to true to automatically assign new users to the default organization (id 1)
+;auto_assign_org = true
+
+# Default role new users will be automatically assigned (if disabled above is set to true)
+;auto_assign_org_role = Viewer
+
+# Background text for the user field on the login page
+;login_hint = email or username
+
+#################################### Anonymous Auth ##########################
+[auth.anonymous]
+# enable anonymous access
+enabled = true
+
+# specify organization name that should be used for unauthenticated users
+org_name = Main Org.
+
+# specify role for unauthenticated users
+;org_role = Admin
+
+#################################### Github Auth ##########################
+[auth.github]
+;enabled = false
+;allow_sign_up = false
+;client_id = some_id
+;client_secret = some_secret
+;scopes = user:email,read:org
+;auth_url = https://github.com/login/oauth/authorize
+;token_url = https://github.com/login/oauth/access_token
+;api_url = https://api.github.com/user
+;team_ids =
+;allowed_organizations =
+
+#################################### Google Auth ##########################
+[auth.google]
+;enabled = false
+;allow_sign_up = false
+;client_id = some_client_id
+;client_secret = some_client_secret
+;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
+;auth_url = https://accounts.google.com/o/oauth2/auth
+;token_url = https://accounts.google.com/o/oauth2/token
+;api_url = https://www.googleapis.com/oauth2/v1/userinfo
+;allowed_domains =
+
+#################################### Auth Proxy ##########################
+[auth.proxy]
+;enabled = false
+;header_name = X-WEBAUTH-USER
+;header_property = username
+;auto_sign_up = true
+
+#################################### Basic Auth ##########################
+[auth.basic]
+;enabled = true
+
+#################################### Auth LDAP ##########################
+[auth.ldap]
+;enabled = false
+;config_file = /etc/grafana/ldap.toml
+
+#################################### SMTP / Emailing ##########################
+[smtp]
+;enabled = false
+;host = localhost:25
+;user =
+;password =
+;cert_file =
+;key_file =
+;skip_verify = false
+;from_address = admin@grafana.localhost
+
+[emails]
+;welcome_email_on_sign_up = false
+
+#################################### Logging ##########################
+[log]
+# Either "console", "file", default is "console"
+# Use comma to separate multiple modes, e.g. "console, file"
+;mode = console, file
+
+# Buffer length of channel, keep it as it is if you don't know what it is.
+;buffer_len = 10000
+
+# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
+;level = Info
+
+# For "console" mode only
+[log.console]
+;level =
+
+# For "file" mode only
+[log.file]
+;level =
+# This enables automated log rotate(switch of following options), default is true
+;log_rotate = true
+
+# Max line number of single file, default is 1000000
+;max_lines = 1000000
+
+# Max size shift of single file, default is 28 means 1 &lt;&lt; 28, 256MB
+;max_lines_shift = 28
+
+# Segment log daily, default is true
+;daily_rotate = true
+
+# Expired days of log file(delete after max days), default is 7
+;max_days = 7
+
+#################################### AMPQ Event Publisher ##########################
+[event_publisher]
+;enabled = false
+;rabbitmq_url = amqp://localhost/
+;exchange = grafana_events
+
+;#################################### Dashboard JSON files ##########################
+[dashboards.json]
+;enabled = false
+;path = /var/lib/grafana/dashboards
+path = /usr/lib/ambari-metrics-grafana/public/dashboards
+    </value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
index 0432ffa..bf62b8e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
@@ -20,7 +20,7 @@
  * limitations under the License.
  */
 -->
-<configuration supports_do_not_extend="true">
+<configuration>
   <property>
     <name>hbase.rootdir</name>
     <value>file:///var/lib/ambari-metrics-collector/hbase</value>
@@ -29,6 +29,16 @@
       HBase to either local filesystem path if using Ambari Metrics in embedded mode or
       to a HDFS dir, example: hdfs://namenode.example.org:8020/amshbase.
     </description>
+    <depends-on>
+      <property>
+        <type>core-site</type>
+        <name>fs.defaultFS</name>
+      </property>
+      <property>
+        <type>ams-site</type>
+        <name>timeline.metrics.service.operation.mode</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -40,6 +50,9 @@
       than '/tmp' (The '/tmp' directory is often cleared on
       machine restart).
     </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -57,6 +70,12 @@
       standalone mode and true for distributed mode. If false, startup will run
       all HBase and ZooKeeper daemons together in the one JVM.
     </description>
+    <depends-on>
+      <property>
+        <type>ams-site</type>
+        <name>timeline.metrics.service.operation.mode</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -149,7 +168,7 @@
   </property>
   <property>
     <name>hbase.normalizer.enabled</name>
-    <value>true</value>
+    <value>false</value>
     <description>If set to true, Master will try to keep region size
     within each table approximately the same.</description>
     <on-ambari-upgrade add="true"/>
@@ -212,7 +231,7 @@
     <name>hbase.hregion.max.filesize</name>
     <value>4294967296</value>
     <description>
-      Maximum HFile size. If the sum of the sizes of a region’s HFiles has grown
+      Maximum HFile size. If the sum of the sizes of a region&#x2019;s HFiles has grown
       to exceed this value, the region is split in two. Default is 10Gb.
     </description>
     <on-ambari-upgrade add="true"/>
@@ -239,22 +258,42 @@
   </property>
   <property>
     <name>phoenix.query.timeoutMs</name>
-    <value>1200000</value>
+    <value>300000</value>
     <description>
       Number of milliseconds after which a query will timeout on the client.
-      Default is 10 min.
+      Default is 5 min.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>phoenix.query.keepAliveMs</name>
+    <value>300000</value>
+    <description>
+      Number of milliseconds after which a query will keep the connection to HBase alive.
+      Default is 5 min.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.client.scanner.timeout.period</name>
-    <value>900000</value>
+    <value>300000</value>
     <description>
       Client scanner lease period in milliseconds.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>hbase.rpc.timeout</name>
+    <value>300000</value>
+    <description>
+      This is for the RPC layer to define how long HBase client applications
+      take for a remote call to time out. It uses pings to check connections
+      but will eventually throw a TimeoutException.
+    </description>
+    <display-name>HBase RPC Timeout</display-name>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>hbase.regionserver.thread.compaction.large</name>
     <value>2</value>
     <description>
@@ -272,7 +311,17 @@
   </property>
   <property>
     <name>hbase.zookeeper.property.clientPort</name>
-    <value>61181</value>
+    <value>{{zookeeper_clientPort}}</value>
+    <depends-on>
+      <property>
+        <type>zoo.cfg</type>
+        <name>clientPort</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -307,11 +356,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>phoenix.query.spoolThresholdBytes</name>
-    <value>12582912</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>hbase.snapshot.enabled</name>
     <value>false</value>
     <description>Enable/Disable HBase snapshots.</description>
@@ -324,6 +368,16 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>hbase.zookeeper.property.tickTime</name>
+    <value>6000</value>
+    <description>
+      The length of a single tick, which is the basic time unit used by
+      ZooKeeper, as measured in milliseconds. This property setting only
+      affects the ZK server started by AMS in embedded mode. Unit = ms.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>zookeeper.session.timeout</name>
     <value>120000</value>
     <description>ZooKeeper session timeout in milliseconds.</description>
@@ -392,8 +446,8 @@
       Maximum living time (in milliseconds) of server caches. A cache entry
       expires after this amount of time has passed since last access. Consider
       adjusting this parameter when a server-side IOException(
-      “Could not find hash cache for joinId”) happens. Getting warnings like
-      “Earlier hash cache(s) might have expired on servers” might also be a
+      &#x201C;Could not find hash cache for joinId&#x201D;) happens. Getting warnings like
+      &#x201C;Earlier hash cache(s) might have expired on servers&#x201D; might also be a
       sign that this number should be increased.
     </description>
     <on-ambari-upgrade add="true"/>
@@ -415,7 +469,6 @@
       Hadoop servers should be configured to allow short circuit read
       for the hbase user for this to take effect
     </description>
-    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ams-hbase-site</type>
@@ -426,6 +479,11 @@
         <name>hbase.rootdir</name>
       </property>
     </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/ams-hbase-unsecure</value>
+    <on-ambari-upgrade add="true"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
index 0a25a9f..b9f534e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
@@ -20,7 +20,7 @@
  * limitations under the License.
  */
 -->
-<configuration supports_do_not_extend="true">
+<configuration>
   <property>
     <name>timeline.metrics.service.operation.mode</name>
     <value>embedded</value>
@@ -56,10 +56,10 @@
       Directory to store aggregator checkpoints. Change to a permanent
       location so that checkpoint ar not lost.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.host.aggregator.minute.interval</name>
@@ -69,10 +69,10 @@
       Time in seconds to sleep for the minute resolution host based
       aggregator. Default resolution is 5 minutes.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.host.aggregator.hourly.interval</name>
@@ -82,10 +82,10 @@
       Time in seconds to sleep for the hourly resolution host based
       aggregator. Default resolution is 1 hour.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.daily.aggregator.minute.interval</name>
@@ -104,10 +104,10 @@
       Time in seconds to sleep for the hourly resolution cluster wide
       aggregator. Default is 1 hour.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.daily.interval</name>
@@ -126,10 +126,10 @@
       Time in seconds to sleep for the minute resolution cluster wide
       aggregator. Default resolution is 5 minutes.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.second.interval</name>
@@ -139,14 +139,14 @@
       Time in seconds to sleep for the second resolution cluster wide
       aggregator. Default resolution is 2 minutes.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier</name>
-    <value>1</value>
+    <value>2</value>
     <description>
       Multiplier value * interval = Max allowed checkpoint lag. Effectively
       if aggregator checkpoint is greater than max allowed checkpoint delay,
@@ -163,10 +163,10 @@
       if aggregator checkpoint is greater than max allowed checkpoint delay,
       the checkpoint will be discarded by the aggregator.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier</name>
@@ -177,10 +177,10 @@
       if aggregator checkpoint is greater than max allowed checkpoint delay,
       the checkpoint will be discarded by the aggregator.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier</name>
@@ -191,10 +191,10 @@
       if aggregator checkpoint is greater than max allowed checkpoint delay,
       the checkpoint will be discarded by the aggregator.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier</name>
@@ -205,10 +205,10 @@
       if aggregator checkpoint is greater than max allowed checkpoint delay,
       the checkpoint will be discarded by the aggregator.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier</name>
@@ -219,14 +219,14 @@
       if aggregator checkpoint is greater than max allowed checkpoint delay,
       the checkpoint will be discarded by the aggregator.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier</name>
-    <value>1</value>
+    <value>2</value>
     <description>
       Multiplier value * interval = Max allowed checkpoint lag. Effectively
       if aggregator checkpoint is greater than max allowed checkpoint delay,
@@ -302,16 +302,16 @@
     <description>
       Lowest resolution of desired data for cluster level second aggregates.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.host.aggregator.daily.ttl</name>
     <value>31536000</value>
     <description>
-      Host based daily resolution data purge interval. Default is 1 year.
+      Host based daily resolution data purge interval in seconds. Default is 1 year.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -319,7 +319,7 @@
     <name>timeline.metrics.host.aggregator.hourly.ttl</name>
     <value>2592000</value>
     <description>
-      Host based hourly resolution data purge interval. Default is 30 days.
+      Host based hourly resolution data purge interval in seconds. Default is 30 days.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -327,23 +327,23 @@
     <name>timeline.metrics.host.aggregator.minute.ttl</name>
     <value>604800</value>
     <description>
-      Host based minute resolution data purge interval. Default is 7 days.
+      Host based minute resolution data purge interval in seconds. Default is 7 days.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.second.ttl</name>
-    <value>2592000</value>
+    <value>259200</value>
     <description>
-      Cluster wide second resolution data purge interval. Default is 7 days.
+      Cluster wide second resolution data purge interval in seconds. Default is 3 days.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregator.minute.ttl</name>
-    <value>7776000</value>
+    <value>2592000</value>
     <description>
-      Cluster wide minute resolution data purge interval. Default is 30 days.
+      Cluster wide minute resolution data purge interval in seconds. Default is 30 days.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -351,7 +351,7 @@
     <name>timeline.metrics.cluster.aggregator.hourly.ttl</name>
     <value>31536000</value>
     <description>
-      Cluster wide hourly resolution data purge interval. Default is 1 year.
+      Cluster wide hourly resolution data purge interval in seconds. Default is 1 year.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -359,7 +359,7 @@
     <name>timeline.metrics.cluster.aggregator.daily.ttl</name>
     <value>63072000</value>
     <description>
-      Cluster wide daily resolution data purge interval. Default is 2 years.
+      Cluster wide daily resolution data purge interval in seconds. Default is 2 years.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -367,8 +367,14 @@
     <name>timeline.metrics.host.aggregator.ttl</name>
     <value>86400</value>
     <description>
-      1 minute resolution data purge interval. Default is 1 day.
+      1 minute resolution data purge interval in seconds. Default is 1 day.
     </description>
+    <depends-on>
+      <property>
+        <type>ams-site</type>
+        <name>timeline.metrics.service.operation.mode</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -399,10 +405,10 @@
       22 aggregate metrics/min * 2 * 60 * 6 : Retrieve 10 SECOND data for 2 hours.
     </description>
     <display-name>Metrics service default result limit</display-name>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.service.checkpointDelay</name>
@@ -412,10 +418,10 @@
       Time in seconds to sleep on the first run or when the checkpoint is
       too old.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.service.resultset.fetchSize</name>
@@ -424,10 +430,10 @@
     <description>
       JDBC resultset prefect size for aggregator queries.
     </description>
-    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Phoenix properties that would manifest in the hbase-site.xml on the client side -->
   <property>
@@ -446,6 +452,9 @@
       Set directory for Phoenix spill files. If possible set this to a
       different mount point from the one for hbase.rootdir in embedded mode.
     </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -473,7 +482,6 @@
       Pre-split regions using the split points corresponding to this property
       for the precision table that stores seconds aggregate data.
     </description>
-    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ams-hbase-site</type>
@@ -492,6 +500,7 @@
         <name>hbase_regionserver_heapsize</name>
       </property>
     </depends-on>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>timeline.metrics.cluster.aggregate.splitpoints</name>
@@ -500,7 +509,6 @@
       Pre-split regions using the split points corresponding to this property
       for the aggregate table that stores seconds aggregate data across hosts.
     </description>
-    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ams-hbase-site</type>
@@ -519,8 +527,8 @@
         <name>hbase_regionserver_heapsize</name>
       </property>
     </depends-on>
+    <on-ambari-upgrade add="true"/>
   </property>
-
   <property>
     <name>timeline.metrics.sink.report.interval</name>
     <value>60</value>
@@ -532,13 +540,27 @@
   </property>
   <property>
     <name>timeline.metrics.sink.collection.period</name>
-    <value>60</value>
+    <value>10</value>
     <description>
       The interval between two service metrics data exports.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>timeline.metrics.service.watcher.disabled</name>
+    <value>false</value>
+    <description>
+      Disable Timeline Metric Store watcher thread. Disabled by default in AMS distributed mode.
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-site</type>
+        <name>timeline.metrics.service.operation.mode</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>timeline.metrics.service.watcher.initial.delay</name>
     <value>600</value>
     <description>
@@ -567,12 +589,135 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>timeline.metrics.hbase.fifo.compaction.enabled</name>
+    <name>timeline.metrics.aggregators.skip.blockcache.enabled</name>
+    <value>false</value>
+    <description>
+      Skip block cache on aggregator queries to allow, HBase block
+      utilization only for user queries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.cache.commit.interval</name>
+    <value>3</value>
+    <description>
+      Time in seconds between committing metrics from cache
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.cache.size</name>
+    <value>150</value>
+    <description>
+      Size of array blocking queue used to cache metrics
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.cache.enabled</name>
     <value>true</value>
     <description>
-      Enable Compaction policy for lower precision and minute aggregate tables.
+      If set to true PhoenixHBaseAccessor will use cache to store metrics before committing them
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.service.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Application History Server for
+      Ambari Metrics System.
+      The following values are supported:
+      - HTTP_ONLY : Service is provided only on http
+      - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>failover.strategy</name>
+    <value>round-robin</value>
+    <description>
+      Failover strategy for metric monitors
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.hbase.init.check.enabled</name>
+    <value>true</value>
+    <description>
+      Enable Initialization check for HBase tables during Metrics service startup.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.interpolation.enabled</name>
+    <value>true</value>
+    <description>
+      Enable Linear interpolation for missing slices of data, while aggregating.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.service.metadata.filters</name>
+    <value>ContainerResource</value>
+    <description>
+      Commas separated list of regular expressions that match metric names
+      which prevents certain metrics from ending up in metadata cache.
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
-
+  <property>
+    <name>timeline.metrics.cluster.aggregation.sql.filters</name>
+    <value>sdisk\_%,boottime</value>
+    <description>
+      Commas separated list of Metric names or Phoenix 'LIKE' class expressions that match metric names
+      which prevents certain metrics from being aggregated across hosts.
+    </description>
+    <on-ambari-upgrade add="true"/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.downsampler.topn.metric.patterns</name>
+    <value>dfs.NNTopUserOpCounts.windowMs=60000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=300000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=1500000.op=__%.user=%</value>
+    <description>
+      Commas separated list of metric name regular expressions that are candidates for Top N downsampling.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.downsampler.topn.value</name>
+    <value>10</value>
+    <description>
+      Top N value for to be used for top N downsampling. Default is 10.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.downsampler.topn.function</name>
+    <value>max</value>
+    <description>
+      Top N function for to be used for top N downsampling (avg/max/sum)
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cluster.zookeeper.quorum</name>
+    <value>{{cluster_zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the cluster ZooKeeper Quorum.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cluster.zookeeper.property.clientPort</name>
+    <value>{{cluster_zookeeper_clientPort}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml
new file mode 100644
index 0000000..cac39de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.client.truststore.location</name>
+    <value>/etc/security/clientKeys/all.jks</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the trust store file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.client.truststore.alias</name>
+    <value></value>
+    <description>Alias used to create certificate for AMS. (Default is hostname)</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7cc3801/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml
new file mode 100644
index 0000000..5d2745f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>ssl.server.truststore.location</name>
+    <value>/etc/security/serverKeys/all.jks</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the trust store file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.truststore.reload.interval</name>
+    <value>10000</value>
+    <description>Truststore reload interval, in milliseconds.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.type</name>
+    <value>jks</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.location</name>
+    <value>/etc/security/serverKeys/keystore.jks</value>
+    <description>Location of the keystore file.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.password</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password to open the keystore file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ssl.server.keystore.keypassword</name>
+    <value>bigdata</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password for private key in keystore file.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[06/19] ambari git commit: AMBARI-21550 PHOENIX_QUERY_SERVER not upgraded in EU (dili)

Posted by jo...@apache.org.
AMBARI-21550 PHOENIX_QUERY_SERVER not upgraded in EU (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/421f3c6a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/421f3c6a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/421f3c6a

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 421f3c6a2bae95a78c1a5754ba75d0bb5d3fc772
Parents: 32f36a7
Author: Di Li <di...@apache.org>
Authored: Fri Jul 21 12:58:21 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Fri Jul 21 12:58:21 2017 -0400

----------------------------------------------------------------------
 .../BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml   | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/421f3c6a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 57e731e..1932364 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -147,6 +147,7 @@
         <component>HBASE_REGIONSERVER</component>
         <component>HBASE_MASTER</component>
         <component>HBASE_REST_SERVER</component>
+        <component>PHOENIX_QUERY_SERVER</component>
       </service>
 
       <service name="HDFS">
@@ -456,6 +457,7 @@
         <component>HBASE_REGIONSERVER</component>
         <component>HBASE_CLIENT</component>
         <component>HBASE_REST_SERVER</component>
+        <component>PHOENIX_QUERY_SERVER</component>
       </service>
     </group>
 

[16/19] ambari git commit: AMBARI-21566. RU: Hive service check was failed during RU (aonishuk)

Posted by jo...@apache.org.
AMBARI-21566. RU: Hive service check was failed during RU (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/439da8bd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/439da8bd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/439da8bd

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 439da8bd015288c54c215ef711dcd556b9a3d24e
Parents: b56446a
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Jul 24 20:58:42 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Jul 24 20:58:42 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/resource_management/core/files/killtree.sh     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/439da8bd/ambari-common/src/main/python/resource_management/core/files/killtree.sh
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/files/killtree.sh b/ambari-common/src/main/python/resource_management/core/files/killtree.sh
index c19efd9..c7809f9 100644
--- a/ambari-common/src/main/python/resource_management/core/files/killtree.sh
+++ b/ambari-common/src/main/python/resource_management/core/files/killtree.sh
@@ -25,7 +25,7 @@ set -e
 killtree() {
     local _pid=$1
     local _sig=${2:--TERM}
-    ambari-sudo.sh kill -stop ${_pid} # needed to stop quickly forking parent from producing children between child killing and parent killing
+    ambari-sudo.sh kill -s stop ${_pid} # needed to stop quickly forking parent from producing children between child killing and parent killing
     for _child in $(ps -o pid --no-headers --ppid ${_pid}); do
         killtree ${_child} ${_sig}
     done

[05/19] ambari git commit: AMBARI-21544. HiveServer2 fails to start with webhdfs call to create /hdp/apps/..jar files fails with org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException (aonishuk)

Posted by jo...@apache.org.
AMBARI-21544. HiveServer2 fails to start with webhdfs call to create /hdp/apps/..jar files  fails with  org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/32f36a71
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/32f36a71
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/32f36a71

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 32f36a717509bb7526833a94a0e5916ea806274a
Parents: caec39f
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Jul 21 12:39:06 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Jul 21 12:39:06 2017 +0300

----------------------------------------------------------------------
 .../libraries/providers/hdfs_resource.py        | 48 +++++++++++++++++++-
 1 file changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/32f36a71/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index efca23d..0c45719 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -61,6 +61,11 @@ RESOURCE_TO_JSON_FIELDS = {
   'dfs_type': 'dfs_type'
 }
 
+EXCEPTIONS_TO_RETRY = {
+  # "ExceptionName": (try_count, try_sleep_seconds)
+  "LeaseExpiredException": (20, 6),
+}
+
 class HdfsResourceJar:
   """
   This is slower than HdfsResourceWebHDFS implementation of HdfsResouce, but it works in any cases on any DFS types.
@@ -132,6 +137,17 @@ class HdfsResourceJar:
     # Clean
     env.config['hdfs_files'] = []
 
+
+class WebHDFSCallException(Fail):
+  def __init__(self, message, result_message):
+    self.result_message = result_message
+    super(WebHDFSCallException, self).__init__(message)
+
+  def get_exception_name(self):
+    if isinstance(self.result_message, dict) and "RemoteException" in self.result_message and "exception" in self.result_message["RemoteException"]:
+      return self.result_message["RemoteException"]["exception"]
+    return None
+
 class WebHDFSUtil:
   def __init__(self, hdfs_site, run_user, security_enabled, logoutput=None):
     https_nn_address = namenode_ha_utils.get_property_for_active_namenode(hdfs_site, 'dfs.namenode.https-address',
@@ -153,8 +169,36 @@ class WebHDFSUtil:
     # only hdfs seems to support webHDFS
     return (is_webhdfs_enabled and default_fs.startswith("hdfs"))
     
+  def run_command(self, *args, **kwargs):
+    """
+    This functions is a wrapper for self._run_command which does retry routine for it.
+    """
+    try:
+      return self._run_command(*args, **kwargs)
+    except WebHDFSCallException as ex:
+      exception_name = ex.get_exception_name()
+      if exception_name in EXCEPTIONS_TO_RETRY:
+        try_count, try_sleep = EXCEPTIONS_TO_RETRY[exception_name]
+        last_exception = ex
+      else:
+        raise
+
+    while True:
+      Logger.info("Retrying after {0} seconds. Reason: {1}".format(try_sleep, str(last_exception)))
+      try_count -= 1
+      time.sleep(try_sleep)
+
+      if try_count == 0:
+        break
+
+      try:
+        self._run_command(*args, **kwargs)
+        break
+      except WebHDFSCallException as ex:
+        last_exception = ex
+
   valid_status_codes = ["200", "201"]
-  def run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
+  def _run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
     """
     assertable_result - some POST requests return '{"boolean":false}' or '{"boolean":true}'
     depending on if query was successful or not, we can assert this for them
@@ -201,7 +245,7 @@ class WebHDFSUtil:
       formatted_output = json.dumps(result_dict, indent=2) if isinstance(result_dict, dict) else result_dict
       formatted_output = err + "\n" + formatted_output
       err_msg = "Execution of '%s' returned status_code=%s. %s" % (shell.string_cmd_from_args_list(cmd), status_code, formatted_output)
-      raise Fail(err_msg)
+      raise WebHDFSCallException(err_msg, result_dict)
     
     return result_dict
     

[02/19] ambari git commit: AMBARI-21541 Restart services failed post Ambari Upgrade (dili)

Posted by jo...@apache.org.
AMBARI-21541 Restart services failed post Ambari Upgrade (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ab1d378d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ab1d378d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ab1d378d

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: ab1d378d2d2b244eec4fea6a87dca54b4d42073e
Parents: d4244f5
Author: Di Li <di...@apache.org>
Authored: Thu Jul 20 20:46:09 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Jul 20 20:46:09 2017 -0400

----------------------------------------------------------------------
 .../4.0/hooks/before-ANY/scripts/shared_initialization.py          | 2 +-
 .../4.2.5/hooks/before-ANY/scripts/shared_initialization.py        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d378d/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
index 1c98fb8..ccb0293 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -147,7 +147,7 @@ def get_uid(user):
        mode=0555)
     ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
     newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
-    return newUid
+    return int(newUid)
     
 def setup_hadoop_env():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d378d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
index 930ed1f..9741397 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
@@ -160,7 +160,7 @@ def get_uid(user):
        mode=0555)
     ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
     newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
-    return newUid
+    return int(newUid)
 
 def setup_hadoop_env():
   import params


[13/19] ambari git commit: AMBARI-21562 HBaseRestServer not visible in HBase summary page. (atkach)

Posted by jo...@apache.org.
AMBARI-21562 HBaseRestServer not visible in HBase summary page. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/110c8cd6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/110c8cd6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/110c8cd6

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 110c8cd6e161c6a46c4ff4dffba4421fbc05f867
Parents: dae2c38
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 24 15:52:25 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 24 15:52:25 2017 +0300

----------------------------------------------------------------------
 .../app/mappers/components_state_mapper.js      |  5 +++++
 .../app/mappers/service_metrics_mapper.js       |  3 +++
 ambari-web/app/messages.js                      |  3 +++
 ambari-web/app/models/service/hbase.js          |  3 +++
 .../templates/main/service/services/hbase.hbs   | 20 ++++++++++++++++++++
 .../app/views/main/service/services/hbase.js    |  8 ++++++++
 6 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/mappers/components_state_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/components_state_mapper.js b/ambari-web/app/mappers/components_state_mapper.js
index a7e6d60..0ac6a26 100644
--- a/ambari-web/app/mappers/components_state_mapper.js
+++ b/ambari-web/app/mappers/components_state_mapper.js
@@ -82,6 +82,11 @@ App.componentsStateMapper = App.QuickDataMapper.create({
       region_servers_installed: 'INSTALLED_PATH',
       region_servers_total: 'TOTAL_PATH'
     },
+    'HBASE_REST_SERVER': {
+      rest_servers_started: 'STARTED_PATH',
+      rest_servers_installed: 'INSTALLED_PATH',
+      rest_servers_total: 'TOTAL_PATH'
+    },
     'PHOENIX_QUERY_SERVER': {
       phoenix_servers_started: 'STARTED_PATH',
       phoenix_servers_installed: 'INSTALLED_PATH',

http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/mappers/service_metrics_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/service_metrics_mapper.js b/ambari-web/app/mappers/service_metrics_mapper.js
index 87b6149..f9b3548 100644
--- a/ambari-web/app/mappers/service_metrics_mapper.js
+++ b/ambari-web/app/mappers/service_metrics_mapper.js
@@ -117,6 +117,9 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
     region_servers_started: 'region_servers_started',
     region_servers_installed: 'region_servers_installed',
     region_servers_total: 'region_servers_total',
+    rest_servers_started: 'rest_servers_started',
+    rest_servers_installed: 'rest_servers_installed',
+    rest_servers_total: 'rest_servers_total',
     phoenix_servers_started: 'phoenix_servers_started',
     phoenix_servers_installed: 'phoenix_servers_installed',
     phoenix_servers_total: 'phoenix_servers_total'

http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 1bad287..9e640ed 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1879,6 +1879,7 @@ Em.I18n.translations = {
   'services.service.summary.nodeManagersLive':'NodeManagers Live',
   'services.service.summary.TrackersLive':'Trackers Live',
   'services.service.summary.RegionServersLIVE':'RegionServers Live',
+  'services.service.summary.RestServersLIVE':'HBaseRESTServer Live',
   'services.service.summary.PhoenixServersLIVE':'Phoenix Query servers Live',
   'services.service.summary.GangliaMonitorsLIVE':'Ganglia Monitors Live',
   'services.service.summary.SupervisorsLIVE':'Supervisors Live',
@@ -2952,6 +2953,8 @@ Em.I18n.translations = {
   'dashboard.services.hbase.averageLoadPerServer':'{0} regions per RegionServer',
   'dashboard.services.hbase.regionServers':'RegionServers',
   'dashboard.services.hbase.regionServersSummary':'{0} live / {1} total',
+  'dashboard.services.hbase.restServers':'HBaseRESTServer',
+  'dashboard.services.hbase.restServersSummary':'{0} live / {1} total',
   'dashboard.services.hbase.phoenixServers':'Phoenix Query Servers',
   'dashboard.services.hbase.phoenixServersSummary':'{0} live / {1} total',
   'dashboard.services.hbase.chart.label':'Request Count',

http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/models/service/hbase.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/service/hbase.js b/ambari-web/app/models/service/hbase.js
index 09378fc..ccf7068 100644
--- a/ambari-web/app/models/service/hbase.js
+++ b/ambari-web/app/models/service/hbase.js
@@ -22,6 +22,9 @@ App.HBaseService = App.Service.extend({
   regionServersStarted: DS.attr('number'),
   regionServersInstalled: DS.attr('number'),
   regionServersTotal: DS.attr('number'),
+  restServersStarted: DS.attr('number'),
+  restServersInstalled: DS.attr('number'),
+  restServersTotal: DS.attr('number'),
   phoenixServersStarted: DS.attr('number'),
   phoenixServersInstalled: DS.attr('number'),
   phoenixServersTotal: DS.attr('number'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/templates/main/service/services/hbase.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/hbase.hbs b/ambari-web/app/templates/main/service/services/hbase.hbs
index d575127..916d984 100644
--- a/ambari-web/app/templates/main/service/services/hbase.hbs
+++ b/ambari-web/app/templates/main/service/services/hbase.hbs
@@ -18,6 +18,26 @@
 
 <!-- HBase Master Server -->
 {{view view.dashboardMasterComponentView}}
+<!-- HBaseRESTServer -->
+{{#if view.isRestServerCreated}}
+  <tr {{bindAttr class=":component view.restServerComponent.componentName"}}>
+    <td class="summary-label"><a
+      href="#" {{action filterHosts view.restServerComponent}}>{{t dashboard.services.hbase.restServers}}</a>
+    </td>
+    <td class="summary-value">
+        {{#if App.router.clusterController.isServiceContentFullyLoaded}}
+          <span>
+              {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.restServersStarted" totalComponentsBinding="view.service.restServersTotal"}}
+                  {{view.liveComponents}}/{{view.totalComponents}}
+              {{/view}}
+          </span>
+            {{t services.service.summary.RestServersLIVE}}
+        {{else}}
+            {{t common.loading.eclipses}}
+        {{/if}}
+    </td>
+  </tr>
+{{/if}}
 <!-- RegionServers -->
 {{#if view.isRegionServerCreated}}
   <tr {{bindAttr class=":component view.regionServerComponent.componentName"}}>

http://git-wip-us.apache.org/repos/asf/ambari/blob/110c8cd6/ambari-web/app/views/main/service/services/hbase.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/services/hbase.js b/ambari-web/app/views/main/service/services/hbase.js
index d2716aa..8a4bf80 100644
--- a/ambari-web/app/views/main/service/services/hbase.js
+++ b/ambari-web/app/views/main/service/services/hbase.js
@@ -89,6 +89,10 @@ App.MainDashboardServiceHbaseView = App.MainDashboardServiceView.extend({
     componentName: 'HBASE_REGIONSERVER'
   }),
 
+  restServerComponent: Em.Object.create({
+    componentName: 'HBASE_REST_SERVER'
+  }),
+
   phoenixServerComponent: Em.Object.create({
     componentName: 'PHOENIX_QUERY_SERVER'
   }),
@@ -97,6 +101,10 @@ App.MainDashboardServiceHbaseView = App.MainDashboardServiceView.extend({
     return this.isServiceComponentCreated('HBASE_REGIONSERVER');
   }.property('App.router.clusterController.isComponentsStateLoaded'),
 
+  isRestServerCreated: function () {
+    return this.isServiceComponentCreated('HBASE_REST_SERVER');
+  }.property('App.router.clusterController.isComponentsStateLoaded'),
+
   isPhoenixQueryServerCreated: function () {
     return this.isServiceComponentCreated('PHOENIX_QUERY_SERVER');
   }.property('App.router.clusterController.isComponentsStateLoaded')

[14/19] ambari git commit: AMBARI-21272. LDAP sync requires user to be root - re-apply due to accident revert (echekanskiy)

Posted by jo...@apache.org.
AMBARI-21272. LDAP sync requires user to be root - re-apply due to accident revert (echekanskiy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fe761ef7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fe761ef7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fe761ef7

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: fe761ef7ee796de450dd71708900184228cbe6e3
Parents: 110c8cd
Author: Eugene Chekanskiy <ec...@apache.org>
Authored: Mon Jul 24 16:42:23 2017 +0300
Committer: Eugene Chekanskiy <ec...@apache.org>
Committed: Mon Jul 24 16:42:23 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_server/setupSecurity.py     |  4 ----
 ambari-server/src/test/python/TestAmbariServer.py      | 13 +------------
 2 files changed, 1 insertion(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fe761ef7/ambari-server/src/main/python/ambari_server/setupSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
index ea3b9e5..f175d7c 100644
--- a/ambari-server/src/main/python/ambari_server/setupSecurity.py
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -275,10 +275,6 @@ class LdapSyncOptions:
 #
 def sync_ldap(options):
   logger.info("Sync users and groups with configured LDAP.")
-  if not is_root():
-    err = 'Ambari-server sync-ldap should be run with ' \
-          'root-level privileges'
-    raise FatalException(4, err)
 
   properties = get_ambari_properties()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fe761ef7/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 1ac77ab2..fb0bb70 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -7747,13 +7747,12 @@ class TestAmbariServer(TestCase):
   @patch("urllib2.urlopen")
   @patch("urllib2.Request")
   @patch("base64.encodestring")
-  @patch("ambari_server.setupSecurity.is_root")
   @patch("ambari_server.setupSecurity.is_server_runing")
   @patch("ambari_server.setupSecurity.get_ambari_properties")
   @patch("ambari_server.setupSecurity.get_validated_string_input")
   @patch("ambari_server.setupSecurity.logger")
   def test_sync_ldap_forbidden(self, logger_mock, get_validated_string_input_method, get_ambari_properties_method,
-                                is_server_runing_method, is_root_method,
+                                is_server_runing_method,
                                 encodestring_method, request_constructor, urlopen_method):
 
     options = self._create_empty_options_mock()
@@ -7762,16 +7761,6 @@ class TestAmbariServer(TestCase):
     options.ldap_sync_users = None
     options.ldap_sync_groups = None
 
-    is_root_method.return_value = False
-    try:
-      sync_ldap(options)
-      self.fail("Should throw exception if not root")
-    except FatalException as fe:
-      # Expected
-      self.assertTrue("root-level" in fe.reason)
-      pass
-    is_root_method.return_value = True
-
     is_server_runing_method.return_value = (None, None)
     try:
       sync_ldap(options)

[15/19] ambari git commit: AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (additional patch) (dsen)

Posted by jo...@apache.org.
AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (additional patch) (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b56446af
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b56446af
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b56446af

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: b56446af660e1bb1e1a61db70a04ac0396bd6851
Parents: fe761ef
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jul 24 19:58:29 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jul 24 19:58:29 2017 +0300

----------------------------------------------------------------------
 .../AmbariCustomCommandExecutionHelper.java     |  5 +++
 .../AmbariManagementControllerImpl.java         | 43 ++++++++++++++++----
 2 files changed, 39 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b56446af/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 723a10d..9bd38fc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -139,6 +139,7 @@ public class AmbariCustomCommandExecutionHelper {
   public final static String DECOM_SLAVE_COMPONENT = "slave_type";
   public final static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
   public final static String UPDATE_FILES_ONLY = "update_files_only";
+  public final static String MULTI_SERVICES_DECOM_REQUEST = "multi_services_decom_request";
 
   private final static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
 
@@ -814,6 +815,10 @@ public class AmbariCustomCommandExecutionHelper {
     Set<String> includedHosts = getHostList(actionExecutionContext.getParameters(),
                                             DECOM_INCLUDED_HOSTS);
 
+    if (actionExecutionContext.getParameters().get(MULTI_SERVICES_DECOM_REQUEST) != null &&
+            actionExecutionContext.getParameters().get(MULTI_SERVICES_DECOM_REQUEST).equalsIgnoreCase("true")) {
+      includedHosts = getHostList(actionExecutionContext.getParameters(), masterCompType + "_" + DECOM_INCLUDED_HOSTS);
+    }
 
     Set<String> cloneSet = new HashSet<>(excludedHosts);
     cloneSet.retainAll(includedHosts);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b56446af/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 850838d..13526c4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -3335,7 +3335,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     if (changedComps != null) {
       for (Entry<State, List<ServiceComponent>> entry :
-          changedComps.entrySet()) {
+        changedComps.entrySet()) {
         State newState = entry.getKey();
         for (ServiceComponent sc : entry.getValue()) {
           sc.setDesiredState(newState);
@@ -3343,7 +3343,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    //keep 2 maps for simpler maintenance
     Map<String, String> serviceMasterForDecommissionMap = new HashMap<>();
+    Map<String, Set<String>> masterSlaveHostsMap = new HashMap<>();
     for (Map<State, List<ServiceComponentHost>> stateScHostMap :
         changedScHosts.values()) {
       for (Entry<State, List<ServiceComponentHost>> entry :
@@ -3365,6 +3367,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               //Filter services whose masters are not started
               if (isServiceComponentStartedOnAnyHost(cluster, serviceName, masterComponentName)) {
                 serviceMasterForDecommissionMap.put(serviceName, masterComponentName);
+                if (!masterSlaveHostsMap.containsKey(masterComponentName)) {
+                  masterSlaveHostsMap.put(masterComponentName, new HashSet<String>());
+                }
+                masterSlaveHostsMap.get(masterComponentName).add(sch.getHostName());
               } else {
                 LOG.info(String.format("Not adding %s service from include/exclude files refresh map because it's master is not started", serviceName));
               }
@@ -3380,7 +3386,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
 
     try {
-      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(serviceMasterForDecommissionMap, cluster.getClusterName());
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(serviceMasterForDecommissionMap, masterSlaveHostsMap, cluster.getClusterName(), false);
     } catch (AmbariException e) {
       LOG.error("Exception during refresh include exclude files action : ", e);
     }
@@ -3651,7 +3657,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
+    //keep 2 maps for simpler maintenance
     Map<String, Map<String, String>> clusterServiceMasterForDecommissionMap = new HashMap<>();
+    Map<String, Map<String, Set<String>>> clusterMasterSlaveHostsMap = new HashMap<>();
 
     for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry : safeToRemoveSCHs.entrySet()) {
       for (ServiceComponentHost componentHost : entry.getValue()) {
@@ -3669,10 +3677,19 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
             }
             if (clusterServiceMasterForDecommissionMap.containsKey(componentHost.getClusterName())) {
               clusterServiceMasterForDecommissionMap.get(componentHost.getClusterName()).put(componentHost.getServiceName(), masterComponentName);
+              Map<String, Set<String>> masterSlaveMap  = clusterMasterSlaveHostsMap.get(componentHost.getClusterName());
+              if (!masterSlaveMap.containsKey(masterComponentName)) {
+                masterSlaveMap.put(masterComponentName, new HashSet<String>());
+              }
+              masterSlaveMap.get(masterComponentName).add(componentHost.getHostName());
             } else {
-              Map<String, String> tempMap = new HashMap<>();
-              tempMap.put(componentHost.getServiceName(), masterComponentName);
-              clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), tempMap);
+              Map<String, String> serviceMasterMap = new HashMap<>();
+              serviceMasterMap.put(componentHost.getServiceName(), masterComponentName);
+              clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), serviceMasterMap);
+
+              Map<String, Set<String>> masterSlaveHostsMap = new HashMap<>();
+              masterSlaveHostsMap.put(masterComponentName, new HashSet<String>(Collections.singletonList(componentHost.getHostName())));
+              clusterMasterSlaveHostsMap.put(componentHost.getClusterName(), masterSlaveHostsMap);
             }
           }
         } catch (Exception ex) {
@@ -3682,7 +3699,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     }
 
     for (String cluster : clusterServiceMasterForDecommissionMap.keySet()) {
-      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster), cluster);
+      createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster), clusterMasterSlaveHostsMap.get(cluster), cluster, true);
     }
 
     //Do not break behavior for existing clients where delete request contains only 1 host component.
@@ -3740,10 +3757,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   /**
    * Creates and triggers an action to update include and exclude files for the master components depending on current cluster topology and components state
    * @param serviceMasterMap
-   * @param clusterName
-   * @throws AmbariException
+   * @param masterSlaveHostsMap
+   *@param clusterName  @throws AmbariException
    */
-  private void createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> serviceMasterMap, String clusterName) throws AmbariException {
+  private void createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> serviceMasterMap, Map<String, Set<String>> masterSlaveHostsMap, String clusterName, boolean isDecommission) throws AmbariException {
     //Clear include/exclude files or draining list except HBASE
     serviceMasterMap.remove(Service.Type.HBASE.toString());
     //exit if empty
@@ -3757,6 +3774,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     HashMap<String, String> params = new HashMap<>();
     params.put(AmbariCustomCommandExecutionHelper.UPDATE_FILES_ONLY, "false");
 
+    for (String masterName : masterSlaveHostsMap.keySet()) {
+      if (!isDecommission) {
+        params.put(masterName + "_" + AmbariCustomCommandExecutionHelper.DECOM_INCLUDED_HOSTS, StringUtils.join(masterSlaveHostsMap.get(masterName).toArray(), ","));
+      }
+    }
+
+    params.put(AmbariCustomCommandExecutionHelper.MULTI_SERVICES_DECOM_REQUEST, "true");
+
     //Create filter for command
     List<RequestResourceFilter> resourceFilters = new ArrayList<>(serviceMasterMap.size());
     for (String serviceName : serviceMasterMap.keySet()) {


[09/19] ambari git commit: AMBARI-21556. Critical alert about Metrics Collector ZooKeeper Server Process due to unsubstituted variable. (swagle)

Posted by jo...@apache.org.
AMBARI-21556. Critical alert about Metrics Collector ZooKeeper Server Process due to unsubstituted variable. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f0b03147
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f0b03147
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f0b03147

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: f0b03147fdbc82a62e20a80ad6e132d95f3d277f
Parents: 2441f68
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Fri Jul 21 14:52:06 2017 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Fri Jul 21 14:52:13 2017 -0700

----------------------------------------------------------------------
 .../4.0/services/AMBARI_METRICS/alerts.json     | 25 --------------------
 .../4.2/services/AMBARI_METRICS/alerts.json     | 25 --------------------
 2 files changed, 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f0b03147/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
index 221e585..8b572ef 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
@@ -139,31 +139,6 @@
             "value": "{0} * 100"
           }
         }
-      },
-      {
-        "name": "ams_metrics_collector_zookeeper_server_process",
-        "label": "Metrics Collector - ZooKeeper Server Process",
-        "description": "This host-level alert is triggered if the Metrics Collector's ZooKeeper server process cannot be determined to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{ams-hbase-site/hbase.zookeeper.property.clientPort}}",
-          "default_port": 61181,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
       }
     ],
     "METRICS_MONITOR": [

http://git-wip-us.apache.org/repos/asf/ambari/blob/f0b03147/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
index 0e14b40..90401e6 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
@@ -139,31 +139,6 @@
             "value": "{0} * 100"
           }
         }
-      },
-      {
-        "name": "ams_metrics_collector_zookeeper_server_process",
-        "label": "Metrics Collector - ZooKeeper Server Process",
-        "description": "This host-level alert is triggered if the Metrics Collector's ZooKeeper server process cannot be determined to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{ams-hbase-site/hbase.zookeeper.property.clientPort}}",
-          "default_port": 61181,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
       }
     ],
     "METRICS_MONITOR": [