You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/12 18:47:18 UTC
[01/13] ambari git commit: AMBARI-20950. HdfsResource can not handle
S3 URL when hbase.rootdir is set to S3 URL (aonishuk)
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-21348 d85292828 -> 267cd8b0c
AMBARI-20950. HdfsResource can not handle S3 URL when hbase.rootdir is set to S3 URL (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/75026913
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/75026913
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/75026913
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 7502691354f4bad8eee47885cac24a4e528d9fc7
Parents: 065dd9c
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Jul 10 12:58:16 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Jul 10 12:58:16 2017 +0300
----------------------------------------------------------------------
.../HBASE/0.96.0.2.0/package/scripts/hbase.py | 12 +++++++-----
.../HBASE/0.96.0.2.0/package/scripts/params_linux.py | 3 +++
2 files changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/75026913/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 68856ce..ac71ce4 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -17,6 +17,7 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
+from urlparse import urlparse
import os
from resource_management import *
import sys
@@ -193,11 +194,12 @@ def hbase(name=None):
owner=params.hbase_user
)
if name == "master":
- params.HdfsResource(params.hbase_hdfs_root_dir,
- type="directory",
- action="create_on_execute",
- owner=params.hbase_user
- )
+ if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(params.default_fs).scheme:
+ params.HdfsResource(params.hbase_hdfs_root_dir,
+ type="directory",
+ action="create_on_execute",
+ owner=params.hbase_user
+ )
params.HdfsResource(params.hbase_staging_dir,
type="directory",
action="create_on_execute",
http://git-wip-us.apache.org/repos/asf/ambari/blob/75026913/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 43c6036..1ee5248 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -17,6 +17,8 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
+from urlparse import urlparse
+
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
@@ -234,6 +236,7 @@ else:
hbase_env_sh_template = config['configurations']['hbase-env']['content']
hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_hdfs_root_dir_protocol = urlparse(hbase_hdfs_root_dir).scheme
hbase_staging_dir = "/apps/hbase/staging"
#for create_hdfs_directory
hostname = config["hostname"]
[04/13] ambari git commit: AMBARI-21427. Assigning hosts concurrently
to same config group may fail with
'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException:
Config group already exist'. (stoader)
Posted by jo...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/515a641c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/515a641c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/515a641c
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 515a641cda7045ddd7c01c58e294c2d1853b4cec
Parents: e92b503
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Mon Jul 10 13:02:20 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Tue Jul 11 11:19:59 2017 +0200
----------------------------------------------------------------------
.../ambari/server/topology/AmbariContext.java | 30 ++++++++++++++++----
1 file changed, 25 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/515a641c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..f5b674e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -30,6 +30,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
import javax.annotation.Nullable;
import javax.inject.Inject;
@@ -79,6 +80,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
/**
@@ -112,6 +114,16 @@ public class AmbariContext {
private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
+
+ /**
+ * When config groups are created using Blueprints these are created when
+ * hosts join a hostgroup and are added to the corresponding config group.
+ * Since hosts join in parallel there might be a race condition in creating
+ * the config group a host is to be added to. Thus we need to synchronize
+ * the creation of config groups with the same name.
+ */
+ private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
public boolean isClusterKerberosEnabled(long clusterId) {
Cluster cluster;
try {
@@ -328,11 +340,17 @@ public class AmbariContext {
}
public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+ final String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+ Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
try {
+ configGroupLock.lock();
+
boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
- return addHostToExistingConfigGroups(hostName, topology, groupName);
+ return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
}
});
if (!hostAdded) {
@@ -342,6 +360,9 @@ public class AmbariContext {
LOG.error("Unable to register config group for host: ", e);
throw new RuntimeException("Unable to register config group for host: " + hostName);
}
+ finally {
+ configGroupLock.unlock();
+ }
}
public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -549,7 +570,7 @@ public class AmbariContext {
/**
* Add the new host to an existing config group.
*/
- private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
boolean addedHost = false;
Clusters clusters;
Cluster cluster;
@@ -563,9 +584,8 @@ public class AmbariContext {
// I don't know of a method to get config group by name
//todo: add a method to get config group by name
Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
- String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
for (ConfigGroup group : configGroups.values()) {
- if (group.getName().equals(qualifiedGroupName)) {
+ if (group.getName().equals(configGroupName)) {
try {
Host host = clusters.getHost(hostName);
addedHost = true;
[07/13] ambari git commit: Merge branch 'branch-feature-AMBARI-21348'
into branch-2.5
Posted by jo...@apache.org.
Merge branch 'branch-feature-AMBARI-21348' into branch-2.5
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/37192c9b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/37192c9b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/37192c9b
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 37192c9b96511a55700fe77594e7bce03d009691
Parents: e92aae4 40cd87d
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Jul 11 12:23:53 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 11 12:23:53 2017 -0400
----------------------------------------------------------------------
ambari-agent/pom.xml | 2 +
.../resource_management/TestPackageResource.py | 41 +
.../core/providers/package/__init__.py | 2 +-
.../core/providers/package/apt.py | 10 +-
.../core/providers/package/choco.py | 4 +-
.../core/providers/package/yumrpm.py | 9 +-
.../core/providers/package/zypper.py | 9 +-
.../core/resources/packaging.py | 6 +
.../libraries/functions/conf_select.py | 57 +-
.../libraries/functions/stack_features.py | 13 +
.../libraries/functions/stack_tools.py | 39 +
.../libraries/script/script.py | 19 +-
ambari-server/pom.xml | 8 +
ambari-server/src/main/assemblies/server.xml | 10 +
.../apache/ambari/annotations/Experimental.java | 6 +
.../ambari/annotations/ExperimentalFeature.java | 7 +-
.../actionmanager/ExecutionCommandWrapper.java | 45 +
.../ambari/server/agent/ExecutionCommand.java | 1 +
.../alerts/ComponentVersionAlertRunnable.java | 4 +-
.../eventcreator/UpgradeEventCreator.java | 2 +-
.../ambari/server/checks/CheckDescription.java | 8 +
.../ambari/server/checks/JavaVersionCheck.java | 102 +
.../server/checks/PreviousUpgradeCompleted.java | 11 +-
.../controller/ActionExecutionContext.java | 26 +
.../controller/AmbariActionExecutionHelper.java | 21 +-
.../AmbariCustomCommandExecutionHelper.java | 10 -
.../BlueprintConfigurationProcessor.java | 232 +-
.../ClusterStackVersionResourceProvider.java | 187 +-
.../PreUpgradeCheckResourceProvider.java | 9 +-
.../internal/UpgradeResourceProvider.java | 373 +-
.../listeners/upgrade/StackVersionListener.java | 225 +-
.../server/orm/dao/RepositoryVersionDAO.java | 19 +
.../orm/entities/RepositoryVersionEntity.java | 26 +-
.../server/orm/entities/UpgradeEntity.java | 46 +-
.../upgrades/ChangeStackReferencesAction.java | 108 +
.../upgrades/ComponentVersionCheckAction.java | 20 +-
.../upgrades/FinalizeUpgradeAction.java | 160 +-
.../upgrades/UpdateDesiredStackAction.java | 111 +-
.../ambari/server/stack/StackManager.java | 14 +-
.../org/apache/ambari/server/state/Cluster.java | 4 +
.../ambari/server/state/ConfigHelper.java | 47 +-
.../ambari/server/state/UpgradeContext.java | 209 +-
.../server/state/UpgradeContextFactory.java | 14 +-
.../ambari/server/state/UpgradeHelper.java | 56 +-
.../server/state/cluster/ClusterImpl.java | 101 +-
.../services/RetryUpgradeActionService.java | 4 +-
.../state/stack/upgrade/HostOrderGrouping.java | 5 +-
.../ambari/server/topology/AmbariContext.java | 36 +-
.../server/upgrade/UpgradeCatalog220.java | 10 +-
.../server/upgrade/UpgradeCatalog252.java | 105 +
.../main/resources/Ambari-DDL-Derby-CREATE.sql | 8 +-
.../main/resources/Ambari-DDL-MySQL-CREATE.sql | 8 +-
.../main/resources/Ambari-DDL-Oracle-CREATE.sql | 8 +-
.../resources/Ambari-DDL-Postgres-CREATE.sql | 8 +-
.../resources/Ambari-DDL-SQLAnywhere-CREATE.sql | 8 +-
.../resources/Ambari-DDL-SQLServer-CREATE.sql | 8 +-
.../package/alerts/alert_hive_metastore.py | 11 +-
.../package/alerts/alert_llap_app_status.py | 12 +-
.../0.12.0.2.0/package/scripts/status_params.py | 6 +
.../package/alerts/alert_check_oozie_server.py | 8 +-
.../system_action_definitions.xml | 10 +
.../scripts/force_remove_packages.py | 56 +
.../custom_actions/scripts/install_packages.py | 6 +-
.../resources/host_scripts/alert_disk_space.py | 10 +-
.../host_scripts/alert_version_select.py | 16 +-
.../4.0/blueprints/multinode-default.json | 182 +
.../4.0/blueprints/singlenode-default.json | 133 +
.../4.0/configuration/cluster-env.xml | 338 +
.../4.0/hooks/after-INSTALL/scripts/hook.py | 38 +
.../4.0/hooks/after-INSTALL/scripts/params.py | 88 +
.../scripts/shared_initialization.py | 89 +
.../hooks/before-ANY/files/changeToSecureUid.sh | 63 +
.../4.0/hooks/before-ANY/scripts/hook.py | 36 +
.../4.0/hooks/before-ANY/scripts/params.py | 226 +
.../before-ANY/scripts/shared_initialization.py | 242 +
.../4.0/hooks/before-INSTALL/scripts/hook.py | 37 +
.../4.0/hooks/before-INSTALL/scripts/params.py | 111 +
.../scripts/repo_initialization.py | 63 +
.../scripts/shared_initialization.py | 34 +
.../4.0/hooks/before-RESTART/scripts/hook.py | 29 +
.../hooks/before-START/files/checkForFormat.sh | 65 +
.../before-START/files/fast-hdfs-resource.jar | Bin 0 -> 28296598 bytes
.../before-START/files/task-log4j.properties | 134 +
.../hooks/before-START/files/topology_script.py | 66 +
.../4.0/hooks/before-START/scripts/hook.py | 40 +
.../4.0/hooks/before-START/scripts/params.py | 211 +
.../before-START/scripts/rack_awareness.py | 71 +
.../scripts/shared_initialization.py | 152 +
.../templates/commons-logging.properties.j2 | 43 +
.../templates/exclude_hosts_list.j2 | 21 +
.../templates/hadoop-metrics2.properties.j2 | 88 +
.../before-START/templates/health_check.j2 | 81 +
.../templates/include_hosts_list.j2 | 21 +
.../templates/topology_mappings.data.j2 | 24 +
.../stacks/BigInsights/4.0/kerberos.json | 68 +
.../stacks/BigInsights/4.0/metainfo.xml | 22 +
.../4.0/properties/stack_features.json | 214 +
.../BigInsights/4.0/properties/stack_tools.json | 14 +
.../stacks/BigInsights/4.0/repos/repoinfo.xml | 35 +
.../BigInsights/4.0/role_command_order.json | 70 +
.../4.0/services/AMBARI_METRICS/alerts.json | 183 +
.../AMBARI_METRICS/configuration/ams-env.xml | 114 +
.../configuration/ams-hbase-env.xml | 245 +
.../configuration/ams-hbase-log4j.xml | 147 +
.../configuration/ams-hbase-policy.xml | 56 +
.../configuration/ams-hbase-security-site.xml | 167 +
.../configuration/ams-hbase-site.xml | 431 +
.../AMBARI_METRICS/configuration/ams-log4j.xml | 66 +
.../AMBARI_METRICS/configuration/ams-site.xml | 578 ++
.../4.0/services/AMBARI_METRICS/kerberos.json | 122 +
.../4.0/services/AMBARI_METRICS/metainfo.xml | 147 +
.../4.0/services/AMBARI_METRICS/metrics.json | 2472 +++++
.../alerts/alert_ambari_metrics_monitor.py | 104 +
.../package/files/hbaseSmokeVerify.sh | 34 +
.../files/service-metrics/AMBARI_METRICS.txt | 245 +
.../package/files/service-metrics/FLUME.txt | 17 +
.../package/files/service-metrics/HBASE.txt | 588 ++
.../package/files/service-metrics/HDFS.txt | 277 +
.../package/files/service-metrics/HOST.txt | 37 +
.../package/files/service-metrics/KAFKA.txt | 190 +
.../package/files/service-metrics/STORM.txt | 7 +
.../package/files/service-metrics/YARN.txt | 178 +
.../AMBARI_METRICS/package/scripts/__init__.py | 19 +
.../AMBARI_METRICS/package/scripts/ams.py | 388 +
.../package/scripts/ams_service.py | 103 +
.../AMBARI_METRICS/package/scripts/functions.py | 51 +
.../AMBARI_METRICS/package/scripts/hbase.py | 267 +
.../package/scripts/hbase_master.py | 70 +
.../package/scripts/hbase_regionserver.py | 66 +
.../package/scripts/hbase_service.py | 53 +
.../package/scripts/metrics_collector.py | 133 +
.../package/scripts/metrics_monitor.py | 58 +
.../AMBARI_METRICS/package/scripts/params.py | 254 +
.../package/scripts/params_linux.py | 50 +
.../package/scripts/params_windows.py | 53 +
.../package/scripts/service_check.py | 165 +
.../package/scripts/service_mapping.py | 22 +
.../package/scripts/split_points.py | 236 +
.../AMBARI_METRICS/package/scripts/status.py | 46 +
.../package/scripts/status_params.py | 39 +
.../package/templates/ams.conf.j2 | 35 +
.../templates/ams_collector_jaas.conf.j2 | 26 +
.../templates/ams_zookeeper_jaas.conf.j2 | 26 +
.../hadoop-metrics2-hbase.properties.j2 | 63 +
.../package/templates/hbase_client_jaas.conf.j2 | 23 +
.../templates/hbase_grant_permissions.j2 | 39 +
.../package/templates/hbase_master_jaas.conf.j2 | 26 +
.../templates/hbase_regionserver_jaas.conf.j2 | 26 +
.../package/templates/metric_groups.conf.j2 | 37 +
.../package/templates/metric_monitor.ini.j2 | 31 +
.../package/templates/regionservers.j2 | 20 +
.../package/templates/smoketest_metrics.json.j2 | 15 +
.../BigInsights/4.0/services/FLUME/alerts.json | 27 +
.../services/FLUME/configuration/flume-conf.xml | 38 +
.../services/FLUME/configuration/flume-env.xml | 94 +
.../BigInsights/4.0/services/FLUME/metainfo.xml | 69 +
.../BigInsights/4.0/services/FLUME/metrics.json | 430 +
.../package/alerts/alert_flume_agent_status.py | 106 +
.../4.0/services/FLUME/package/scripts/flume.py | 228 +
.../FLUME/package/scripts/flume_check.py | 40 +
.../FLUME/package/scripts/flume_handler.py | 145 +
.../FLUME/package/scripts/flume_upgrade.py | 94 +
.../services/FLUME/package/scripts/params.py | 101 +
.../FLUME/package/scripts/params_linux.py | 30 +
.../templates/flume-metrics2.properties.j2 | 26 +
.../FLUME/package/templates/flume.conf.j2 | 24 +
.../FLUME/package/templates/log4j.properties.j2 | 67 +
.../BigInsights/4.0/services/HBASE/alerts.json | 157 +
.../services/HBASE/configuration/hbase-env.xml | 183 +
.../configuration/hbase-javaopts-properties.xml | 28 +
.../HBASE/configuration/hbase-log4j.xml | 144 +
.../HBASE/configuration/hbase-policy.xml | 56 +
.../services/HBASE/configuration/hbase-site.xml | 732 ++
.../4.0/services/HBASE/kerberos.json | 159 +
.../BigInsights/4.0/services/HBASE/metainfo.xml | 161 +
.../BigInsights/4.0/services/HBASE/metrics.json | 9410 +++++++++++++++++
.../HBASE/package/files/draining_servers.rb | 164 +
.../HBASE/package/files/hbaseSmokeVerify.sh | 34 +
.../services/HBASE/package/scripts/__init__.py | 19 +
.../services/HBASE/package/scripts/functions.py | 54 +
.../4.0/services/HBASE/package/scripts/hbase.py | 232 +
.../HBASE/package/scripts/hbase_client.py | 65 +
.../HBASE/package/scripts/hbase_decommission.py | 74 +
.../HBASE/package/scripts/hbase_master.py | 129 +
.../HBASE/package/scripts/hbase_regionserver.py | 131 +
.../package/scripts/hbase_restgatewayserver.py | 84 +
.../HBASE/package/scripts/hbase_service.py | 51 +
.../HBASE/package/scripts/hbase_upgrade.py | 37 +
.../services/HBASE/package/scripts/params.py | 197 +
.../HBASE/package/scripts/service_check.py | 78 +
.../HBASE/package/scripts/status_params.py | 44 +
.../services/HBASE/package/scripts/upgrade.py | 52 +
...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 109 +
...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 107 +
.../HBASE/package/templates/hbase-smoke.sh.j2 | 44 +
.../HBASE/package/templates/hbase.conf.j2 | 35 +
.../package/templates/hbase_client_jaas.conf.j2 | 23 +
.../templates/hbase_grant_permissions.j2 | 39 +
.../package/templates/hbase_master_jaas.conf.j2 | 26 +
.../templates/hbase_regionserver_jaas.conf.j2 | 26 +
.../package/templates/hbase_rest_jaas.conf.j2 | 26 +
.../HBASE/package/templates/regionservers.j2 | 20 +
.../BigInsights/4.0/services/HBASE/widgets.json | 510 +
.../BigInsights/4.0/services/HDFS/alerts.json | 657 ++
.../services/HDFS/configuration/core-site.xml | 203 +
.../services/HDFS/configuration/hadoop-env.xml | 322 +
.../HDFS/configuration/hadoop-policy.xml | 145 +
.../services/HDFS/configuration/hdfs-log4j.xml | 202 +
.../services/HDFS/configuration/hdfs-site.xml | 669 ++
.../services/HDFS/configuration/ssl-client.xml | 65 +
.../services/HDFS/configuration/ssl-server.xml | 72 +
.../BigInsights/4.0/services/HDFS/kerberos.json | 242 +
.../BigInsights/4.0/services/HDFS/metainfo.xml | 234 +
.../BigInsights/4.0/services/HDFS/metrics.json | 7769 +++++++++++++++
.../package/alerts/alert_checkpoint_time.py | 146 +
.../package/alerts/alert_ha_namenode_health.py | 176 +
.../HDFS/package/files/checkForFormat.sh | 70 +
.../services/HDFS/package/files/checkWebUI.py | 56 +
.../scripts/balancer-emulator/hdfs-command.py | 45 +
.../services/HDFS/package/scripts/datanode.py | 144 +
.../HDFS/package/scripts/datanode_upgrade.py | 114 +
.../4.0/services/HDFS/package/scripts/hdfs.py | 129 +
.../HDFS/package/scripts/hdfs_client.py | 112 +
.../HDFS/package/scripts/hdfs_datanode.py | 75 +
.../HDFS/package/scripts/hdfs_namenode.py | 483 +
.../HDFS/package/scripts/hdfs_nfsgateway.py | 72 +
.../HDFS/package/scripts/hdfs_rebalance.py | 130 +
.../HDFS/package/scripts/hdfs_snamenode.py | 50 +
.../HDFS/package/scripts/journalnode.py | 169 +
.../HDFS/package/scripts/journalnode_upgrade.py | 136 +
.../services/HDFS/package/scripts/namenode.py | 319 +
.../HDFS/package/scripts/namenode_ha_state.py | 205 +
.../HDFS/package/scripts/namenode_upgrade.py | 262 +
.../services/HDFS/package/scripts/nfsgateway.py | 138 +
.../4.0/services/HDFS/package/scripts/params.py | 326 +
.../HDFS/package/scripts/service_check.py | 119 +
.../services/HDFS/package/scripts/snamenode.py | 142 +
.../HDFS/package/scripts/status_params.py | 42 +
.../4.0/services/HDFS/package/scripts/utils.py | 357 +
.../services/HDFS/package/scripts/zkfc_slave.py | 148 +
.../package/templates/exclude_hosts_list.j2 | 21 +
.../HDFS/package/templates/hdfs.conf.j2 | 35 +
.../services/HDFS/package/templates/slaves.j2 | 21 +
.../BigInsights/4.0/services/HDFS/widgets.json | 428 +
.../BigInsights/4.0/services/HIVE/alerts.json | 111 +
.../services/HIVE/configuration/hcat-env.xml | 58 +
.../services/HIVE/configuration/hive-env.xml | 211 +
.../HIVE/configuration/hive-exec-log4j.xml | 119 +
.../services/HIVE/configuration/hive-log4j.xml | 137 +
.../services/HIVE/configuration/hive-site.xml | 1248 +++
.../services/HIVE/configuration/webhcat-env.xml | 55 +
.../HIVE/configuration/webhcat-log4j.xml | 79 +
.../HIVE/configuration/webhcat-site.xml | 188 +
.../HIVE/etc/hive-schema-0.12.0.mysql.sql | 777 ++
.../HIVE/etc/hive-schema-0.12.0.oracle.sql | 717 ++
.../HIVE/etc/hive-schema-0.12.0.postgres.sql | 1405 +++
.../HIVE/etc/hive-schema-0.13.0.mysql.sql | 889 ++
.../HIVE/etc/hive-schema-0.13.0.oracle.sql | 834 ++
.../HIVE/etc/hive-schema-0.13.0.postgres.sql | 1537 +++
.../HIVE/etc/hive-schema-0.14.0.mysql.sql | 889 ++
.../HIVE/etc/hive-schema-0.14.0.oracle.sql | 833 ++
.../HIVE/etc/hive-schema-0.14.0.postgres.sql | 1541 +++
.../etc/upgrade-0.12.0-to-0.13.0.oracle.sql | 165 +
.../services/HIVE/etc/upgrade-0.13.0.oracle.sql | 38 +
.../BigInsights/4.0/services/HIVE/kerberos.json | 112 +
.../BigInsights/4.0/services/HIVE/metainfo.xml | 327 +
.../HIVE/package/alerts/alert_hive_metastore.py | 184 +
.../package/alerts/alert_hive_thrift_port.py | 265 +
.../HIVE/package/alerts/alert_webhcat_server.py | 242 +
.../package/etc/hive-schema-0.12.0.mysql.sql | 777 ++
.../package/etc/hive-schema-0.12.0.oracle.sql | 717 ++
.../package/etc/hive-schema-0.12.0.postgres.sql | 1405 +++
.../services/HIVE/package/files/addMysqlUser.sh | 37 +
.../services/HIVE/package/files/hcatSmoke.sh | 36 +
.../services/HIVE/package/files/hiveSmoke.sh | 24 +
.../services/HIVE/package/files/hiveserver2.sql | 23 +
.../HIVE/package/files/hiveserver2Smoke.sh | 32 +
.../4.0/services/HIVE/package/files/pigSmoke.sh | 18 +
.../HIVE/package/files/removeMysqlUser.sh | 33 +
.../HIVE/package/files/startMetastore.sh | 25 +
.../HIVE/package/files/templetonSmoke.sh | 106 +
.../services/HIVE/package/scripts/__init__.py | 19 +
.../4.0/services/HIVE/package/scripts/hcat.py | 73 +
.../HIVE/package/scripts/hcat_client.py | 50 +
.../HIVE/package/scripts/hcat_service_check.py | 78 +
.../4.0/services/HIVE/package/scripts/hive.py | 393 +
.../HIVE/package/scripts/hive_client.py | 81 +
.../HIVE/package/scripts/hive_metastore.py | 199 +
.../HIVE/package/scripts/hive_server.py | 166 +
.../HIVE/package/scripts/hive_server_upgrade.py | 174 +
.../HIVE/package/scripts/hive_service.py | 139 +
.../HIVE/package/scripts/mysql_server.py | 64 +
.../HIVE/package/scripts/mysql_service.py | 52 +
.../HIVE/package/scripts/mysql_users.py | 69 +
.../HIVE/package/scripts/mysql_utils.py | 34 +
.../4.0/services/HIVE/package/scripts/params.py | 418 +
.../HIVE/package/scripts/postgresql_server.py | 109 +
.../HIVE/package/scripts/postgresql_service.py | 39 +
.../HIVE/package/scripts/service_check.py | 91 +
.../HIVE/package/scripts/status_params.py | 87 +
.../services/HIVE/package/scripts/webhcat.py | 117 +
.../HIVE/package/scripts/webhcat_server.py | 146 +
.../HIVE/package/scripts/webhcat_service.py | 60 +
.../package/scripts/webhcat_service_check.py | 117 +
.../HIVE/package/templates/hive.conf.j2 | 35 +
.../package/templates/startHiveserver2.sh.j2 | 24 +
.../package/templates/templeton_smoke.pig.j2 | 24 +
.../BigInsights/4.0/services/KAFKA/alerts.json | 32 +
.../KAFKA/configuration/kafka-broker.xml | 478 +
.../services/KAFKA/configuration/kafka-env.xml | 73 +
.../KAFKA/configuration/kafka-log4j.xml | 117 +
.../4.0/services/KAFKA/kerberos.json | 49 +
.../BigInsights/4.0/services/KAFKA/metainfo.xml | 83 +
.../BigInsights/4.0/services/KAFKA/metrics.json | 264 +
.../4.0/services/KAFKA/package/scripts/kafka.py | 239 +
.../KAFKA/package/scripts/kafka_broker.py | 111 +
.../KAFKA/package/scripts/kafka_upgrade.py | 38 +
.../services/KAFKA/package/scripts/params.py | 115 +
.../KAFKA/package/scripts/service_check.py | 59 +
.../KAFKA/package/scripts/status_params.py | 26 +
.../services/KAFKA/package/scripts/upgrade.py | 88 +
.../4.0/services/KAFKA/package/scripts/utils.py | 38 +
.../KAFKA/package/templates/kafka_jaas.conf.j2 | 41 +
.../KERBEROS/configuration/kerberos-env.xml | 326 +
.../KERBEROS/configuration/krb5-conf.xml | 113 +
.../4.0/services/KERBEROS/kerberos.json | 17 +
.../4.0/services/KERBEROS/metainfo.xml | 147 +
.../KERBEROS/package/scripts/kerberos_client.py | 79 +
.../KERBEROS/package/scripts/kerberos_common.py | 473 +
.../KERBEROS/package/scripts/kerberos_server.py | 141 +
.../services/KERBEROS/package/scripts/params.py | 200 +
.../KERBEROS/package/scripts/service_check.py | 81 +
.../KERBEROS/package/scripts/status_params.py | 32 +
.../services/KERBEROS/package/scripts/utils.py | 105 +
.../KERBEROS/package/templates/kadm5_acl.j2 | 20 +
.../KERBEROS/package/templates/kdc_conf.j2 | 30 +
.../KERBEROS/package/templates/krb5_conf.j2 | 55 +
.../BigInsights/4.0/services/KNOX/alerts.json | 32 +
.../KNOX/configuration/gateway-log4j.xml | 84 +
.../KNOX/configuration/gateway-site.xml | 75 +
.../services/KNOX/configuration/knox-env.xml | 68 +
.../services/KNOX/configuration/ldap-log4j.xml | 67 +
.../services/KNOX/configuration/topology.xml | 158 +
.../services/KNOX/configuration/users-ldif.xml | 139 +
.../BigInsights/4.0/services/KNOX/kerberos.json | 62 +
.../BigInsights/4.0/services/KNOX/metainfo.xml | 88 +
.../KNOX/package/files/validateKnoxStatus.py | 42 +
.../4.0/services/KNOX/package/scripts/knox.py | 134 +
.../KNOX/package/scripts/knox_gateway.py | 290 +
.../services/KNOX/package/scripts/knox_ldap.py | 54 +
.../4.0/services/KNOX/package/scripts/ldap.py | 55 +
.../4.0/services/KNOX/package/scripts/params.py | 172 +
.../KNOX/package/scripts/service_check.py | 92 +
.../KNOX/package/scripts/status_params.py | 50 +
.../services/KNOX/package/scripts/upgrade.py | 72 +
.../package/templates/krb5JAASLogin.conf.j2 | 29 +
.../BigInsights/4.0/services/OOZIE/alerts.json | 45 +
.../services/OOZIE/configuration/oozie-env.xml | 201 +
.../OOZIE/configuration/oozie-log4j.xml | 147 +
.../services/OOZIE/configuration/oozie-site.xml | 416 +
.../4.0/services/OOZIE/kerberos.json | 70 +
.../BigInsights/4.0/services/OOZIE/metainfo.xml | 176 +
.../package/alerts/alert_check_oozie_server.py | 211 +
.../services/OOZIE/package/files/oozieSmoke2.sh | 88 +
.../files/prepareOozieHdfsDirectories.sh | 45 +
.../OOZIE/package/files/wrap_ooziedb.sh | 31 +
.../4.0/services/OOZIE/package/scripts/oozie.py | 279 +
.../OOZIE/package/scripts/oozie_client.py | 76 +
.../OOZIE/package/scripts/oozie_server.py | 193 +
.../package/scripts/oozie_server_upgrade.py | 300 +
.../OOZIE/package/scripts/oozie_service.py | 124 +
.../services/OOZIE/package/scripts/params.py | 259 +
.../OOZIE/package/scripts/service_check.py | 140 +
.../OOZIE/package/scripts/status_params.py | 47 +
.../OOZIE/package/templates/adminusers.txt.j2 | 28 +
.../package/templates/oozie-log4j.properties.j2 | 93 +
.../4.0/services/PIG/configuration/pig-env.xml | 39 +
.../services/PIG/configuration/pig-log4j.xml | 66 +
.../PIG/configuration/pig-properties.xml | 632 ++
.../BigInsights/4.0/services/PIG/kerberos.json | 17 +
.../BigInsights/4.0/services/PIG/metainfo.xml | 86 +
.../4.0/services/PIG/package/files/pigSmoke.sh | 18 +
.../4.0/services/PIG/package/scripts/params.py | 25 +
.../PIG/package/scripts/params_linux.py | 88 +
.../4.0/services/PIG/package/scripts/pig.py | 61 +
.../services/PIG/package/scripts/pig_client.py | 59 +
.../PIG/package/scripts/service_check.py | 123 +
.../SLIDER/configuration/slider-client.xml | 61 +
.../SLIDER/configuration/slider-env.xml | 44 +
.../SLIDER/configuration/slider-log4j.xml | 90 +
.../4.0/services/SLIDER/metainfo.xml | 135 +
.../SLIDER/package/files/hbaseSmokeVerify.sh | 34 +
.../services/SLIDER/package/scripts/__init__.py | 19 +
.../services/SLIDER/package/scripts/params.py | 53 +
.../SLIDER/package/scripts/service_check.py | 42 +
.../services/SLIDER/package/scripts/slider.py | 60 +
.../SLIDER/package/scripts/slider_client.py | 62 +
.../package/templates/storm-slider-env.sh.j2 | 38 +
.../services/SOLR/configuration/solr-env.xml | 216 +
.../services/SOLR/configuration/solr-log4j.xml | 83 +
.../services/SOLR/configuration/solr-site.xml | 47 +
.../BigInsights/4.0/services/SOLR/kerberos.json | 47 +
.../BigInsights/4.0/services/SOLR/metainfo.xml | 74 +
.../services/SOLR/package/scripts/__init__.py | 19 +
.../4.0/services/SOLR/package/scripts/params.py | 182 +
.../SOLR/package/scripts/service_check.py | 60 +
.../4.0/services/SOLR/package/scripts/solr.py | 143 +
.../SOLR/package/scripts/solr_client.py | 36 +
.../SOLR/package/scripts/solr_server.py | 118 +
.../SOLR/package/scripts/solr_service.py | 59 +
.../SOLR/package/scripts/solr_upgrade.py | 135 +
.../SOLR/package/scripts/status_params.py | 34 +
.../services/SOLR/package/templates/solr.xml.j2 | 51 +
.../SOLR/package/templates/solr_jaas.conf.j2 | 26 +
.../BigInsights/4.0/services/SPARK/alerts.json | 32 +
.../SPARK/configuration/spark-defaults.xml | 175 +
.../services/SPARK/configuration/spark-env.xml | 116 +
.../configuration/spark-javaopts-properties.xml | 28 +
.../SPARK/configuration/spark-log4j.xml | 43 +
.../configuration/spark-metrics-properties.xml | 161 +
.../4.0/services/SPARK/kerberos.json | 55 +
.../BigInsights/4.0/services/SPARK/metainfo.xml | 187 +
.../SPARK/package/scripts/job_history_server.py | 167 +
.../services/SPARK/package/scripts/params.py | 199 +
.../SPARK/package/scripts/service_check.py | 78 +
.../4.0/services/SPARK/package/scripts/spark.py | 351 +
.../SPARK/package/scripts/spark_client.py | 61 +
.../SPARK/package/scripts/status_params.py | 41 +
.../SPARK/package/scripts/thrift_server.py | 125 +
.../package/templates/spark-defaults.conf.j2 | 43 +
.../services/SQOOP/configuration/sqoop-env.xml | 62 +
.../BigInsights/4.0/services/SQOOP/metainfo.xml | 93 +
.../services/SQOOP/package/scripts/__init__.py | 19 +
.../services/SQOOP/package/scripts/params.py | 95 +
.../SQOOP/package/scripts/service_check.py | 44 +
.../4.0/services/SQOOP/package/scripts/sqoop.py | 84 +
.../SQOOP/package/scripts/sqoop_client.py | 57 +
.../4.0/services/YARN/MAPREDUCE2_metrics.json | 2596 +++++
.../4.0/services/YARN/YARN_metrics.json | 3486 +++++++
.../4.0/services/YARN/YARN_widgets.json | 617 ++
.../BigInsights/4.0/services/YARN/alerts.json | 398 +
.../YARN/configuration-mapred/mapred-env.xml | 87 +
.../YARN/configuration-mapred/mapred-site.xml | 519 +
.../YARN/configuration/capacity-scheduler.xml | 172 +
.../services/YARN/configuration/yarn-env.xml | 253 +
.../services/YARN/configuration/yarn-log4j.xml | 72 +
.../services/YARN/configuration/yarn-site.xml | 820 ++
.../BigInsights/4.0/services/YARN/kerberos.json | 208 +
.../BigInsights/4.0/services/YARN/metainfo.xml | 264 +
.../package/alerts/alert_nodemanager_health.py | 143 +
.../alerts/alert_nodemanagers_summary.py | 122 +
.../files/validateYarnComponentStatus.py | 170 +
.../services/YARN/package/scripts/__init__.py | 20 +
.../scripts/application_timeline_server.py | 139 +
.../YARN/package/scripts/historyserver.py | 155 +
.../package/scripts/mapred_service_check.py | 80 +
.../YARN/package/scripts/mapreduce2_client.py | 56 +
.../YARN/package/scripts/nodemanager.py | 144 +
.../YARN/package/scripts/nodemanager_upgrade.py | 74 +
.../4.0/services/YARN/package/scripts/params.py | 224 +
.../YARN/package/scripts/resourcemanager.py | 179 +
.../services/YARN/package/scripts/service.py | 76 +
.../YARN/package/scripts/service_check.py | 89 +
.../YARN/package/scripts/status_params.py | 44 +
.../4.0/services/YARN/package/scripts/yarn.py | 277 +
.../YARN/package/scripts/yarn_client.py | 56 +
.../package/templates/container-executor.cfg.j2 | 40 +
.../package/templates/exclude_hosts_list.j2 | 21 +
.../YARN/package/templates/mapreduce.conf.j2 | 35 +
.../package/templates/taskcontroller.cfg.j2 | 38 +
.../YARN/package/templates/yarn.conf.j2 | 35 +
.../4.0/services/ZOOKEEPER/alerts.json | 58 +
.../ZOOKEEPER/configuration/zoo.cfg.xml | 91 +
.../ZOOKEEPER/configuration/zookeeper-env.xml | 77 +
.../ZOOKEEPER/configuration/zookeeper-log4j.xml | 102 +
.../4.0/services/ZOOKEEPER/kerberos.json | 39 +
.../4.0/services/ZOOKEEPER/metainfo.xml | 91 +
.../services/ZOOKEEPER/package/files/zkEnv.sh | 96 +
.../ZOOKEEPER/package/files/zkServer.sh | 120 +
.../ZOOKEEPER/package/files/zkService.sh | 26 +
.../services/ZOOKEEPER/package/files/zkSmoke.sh | 93 +
.../ZOOKEEPER/package/scripts/__init__.py | 20 +
.../ZOOKEEPER/package/scripts/params.py | 96 +
.../ZOOKEEPER/package/scripts/service_check.py | 53 +
.../ZOOKEEPER/package/scripts/status_params.py | 43 +
.../ZOOKEEPER/package/scripts/zookeeper.py | 114 +
.../package/scripts/zookeeper_client.py | 71 +
.../package/scripts/zookeeper_server.py | 161 +
.../package/scripts/zookeeper_service.py | 58 +
.../package/templates/configuration.xsl.j2 | 42 +
.../ZOOKEEPER/package/templates/zoo.cfg.j2 | 53 +
.../templates/zookeeper_client_jaas.conf.j2 | 23 +
.../package/templates/zookeeper_jaas.conf.j2 | 26 +
.../BigInsights/4.0/services/stack_advisor.py | 24 +
.../4.0/stack-advisor/stack_advisor_206.py | 2006 ++++
.../4.0/stack-advisor/stack_advisor_21.py | 259 +
.../4.0/stack-advisor/stack_advisor_22.py | 1713 ++++
.../4.0/stack-advisor/stack_advisor_23.py | 995 ++
.../4.0/stack-advisor/stack_advisor_24.py | 29 +
.../4.0/stack-advisor/stack_advisor_25.py | 1940 ++++
.../stacks/BigInsights/4.0/widgets.json | 95 +
.../stacks/BigInsights/4.1/kerberos.json | 47 +
.../stacks/BigInsights/4.1/metainfo.xml | 23 +
.../stacks/BigInsights/4.1/repos/repoinfo.xml | 44 +
.../4.1/repos/repoinfo.xml.amd64_RH6 | 32 +
.../4.1/repos/repoinfo.xml.amd64_RH7 | 32 +
.../4.1/repos/repoinfo.xml.amd64_SLES | 32 +
.../4.1/repos/repoinfo.xml.ppc64le_RH7 | 32 +
.../4.1/repos/repoinfo.xml.s390x_RH7 | 32 +
.../BigInsights/4.1/role_command_order.json | 22 +
.../4.1/services/AMBARI_METRICS/metainfo.xml | 27 +
.../services/FLUME/configuration/flume-env.xml | 72 +
.../BigInsights/4.1/services/FLUME/metainfo.xml | 36 +
.../BigInsights/4.1/services/HBASE/metainfo.xml | 45 +
.../4.1/services/HBASE/themes/theme.json | 367 +
.../services/HDFS/configuration/hadoop-env.xml | 168 +
.../services/HDFS/configuration/hdfs-site.xml | 48 +
.../BigInsights/4.1/services/HDFS/metainfo.xml | 127 +
.../4.1/services/HDFS/themes/theme.json | 179 +
.../BigInsights/4.1/services/HDFS/widgets.json | 644 ++
.../services/HIVE/configuration/hive-env.xml | 196 +
.../services/HIVE/configuration/hive-site.xml | 356 +
.../BigInsights/4.1/services/HIVE/metainfo.xml | 106 +
.../4.1/services/HIVE/themes/theme.json | 327 +
.../BigInsights/4.1/services/KAFKA/metainfo.xml | 27 +
.../4.1/services/KERBEROS/metainfo.xml | 26 +
.../BigInsights/4.1/services/KNOX/metainfo.xml | 46 +
.../services/OOZIE/configuration/oozie-site.xml | 65 +
.../BigInsights/4.1/services/OOZIE/metainfo.xml | 144 +
.../BigInsights/4.1/services/PIG/metainfo.xml | 38 +
.../4.1/services/SLIDER/metainfo.xml | 46 +
.../BigInsights/4.1/services/SOLR/metainfo.xml | 29 +
.../BigInsights/4.1/services/SPARK/metainfo.xml | 52 +
.../BigInsights/4.1/services/SQOOP/metainfo.xml | 45 +
.../4.1/services/YARN/YARN_widgets.json | 676 ++
.../YARN/configuration-mapred/mapred-site.xml | 53 +
.../services/YARN/configuration/yarn-site.xml | 46 +
.../BigInsights/4.1/services/YARN/metainfo.xml | 82 +
.../4.1/services/YARN/themes-mapred/theme.json | 132 +
.../4.1/services/YARN/themes/theme.json | 250 +
.../4.1/services/ZOOKEEPER/metainfo.xml | 38 +
.../BigInsights/4.1/services/stack_advisor.py | 37 +
.../4.2.5/hooks/after-INSTALL/scripts/hook.py | 37 +
.../4.2.5/hooks/after-INSTALL/scripts/params.py | 101 +
.../scripts/shared_initialization.py | 108 +
.../hooks/before-ANY/files/changeToSecureUid.sh | 63 +
.../4.2.5/hooks/before-ANY/scripts/hook.py | 36 +
.../4.2.5/hooks/before-ANY/scripts/params.py | 241 +
.../before-ANY/scripts/shared_initialization.py | 253 +
.../4.2.5/hooks/before-INSTALL/scripts/hook.py | 37 +
.../hooks/before-INSTALL/scripts/params.py | 113 +
.../scripts/repo_initialization.py | 70 +
.../scripts/shared_initialization.py | 37 +
.../4.2.5/hooks/before-RESTART/scripts/hook.py | 29 +
.../hooks/before-START/files/checkForFormat.sh | 65 +
.../before-START/files/fast-hdfs-resource.jar | Bin 0 -> 28296598 bytes
.../before-START/files/task-log4j.properties | 134 +
.../hooks/before-START/files/topology_script.py | 66 +
.../before-START/scripts/custom_extensions.py | 168 +
.../4.2.5/hooks/before-START/scripts/hook.py | 41 +
.../4.2.5/hooks/before-START/scripts/params.py | 318 +
.../before-START/scripts/rack_awareness.py | 47 +
.../scripts/shared_initialization.py | 177 +
.../templates/commons-logging.properties.j2 | 43 +
.../templates/exclude_hosts_list.j2 | 21 +
.../templates/hadoop-metrics2.properties.j2 | 108 +
.../before-START/templates/health_check.j2 | 81 +
.../templates/include_hosts_list.j2 | 21 +
.../templates/topology_mappings.data.j2 | 24 +
.../stacks/BigInsights/4.2.5/kerberos.json | 47 +
.../stacks/BigInsights/4.2.5/metainfo.xml | 25 +
.../stacks/BigInsights/4.2.5/repos/repoinfo.xml | 32 +
.../4.2.5/repos/repoinfo.xml.amd64_RH6 | 32 +
.../4.2.5/repos/repoinfo.xml.amd64_RH7 | 32 +
.../4.2.5/repos/repoinfo.xml.amd64_SLES | 32 +
.../4.2.5/repos/repoinfo.xml.ppc64le_RH7 | 32 +
.../BigInsights/4.2.5/role_command_order.json | 31 +
.../4.2.5/services/AMBARI_INFRA/metainfo.xml | 26 +
.../AMBARI_INFRA/role_command_order.json | 7 +
.../4.2.5/services/AMBARI_METRICS/metainfo.xml | 27 +
.../4.2.5/services/FLUME/metainfo.xml | 39 +
.../services/HBASE/configuration/hbase-env.xml | 198 +
.../services/HBASE/configuration/hbase-site.xml | 391 +
.../HBASE/configuration/ranger-hbase-audit.xml | 121 +
.../ranger-hbase-plugin-properties.xml | 83 +
.../ranger-hbase-policymgr-ssl.xml | 66 +
.../configuration/ranger-hbase-security.xml | 68 +
.../4.2.5/services/HBASE/kerberos.json | 212 +
.../4.2.5/services/HBASE/metainfo.xml | 88 +
.../4.2.5/services/HBASE/metrics.json | 9370 +++++++++++++++++
.../services/HBASE/quicklinks/quicklinks.json | 121 +
.../4.2.5/services/HBASE/themes/theme.json | 411 +
.../4.2.5/services/HBASE/widgets.json | 510 +
.../services/HDFS/configuration/core-site.xml | 53 +
.../services/HDFS/configuration/hadoop-env.xml | 212 +
.../services/HDFS/configuration/hdfs-log4j.xml | 225 +
.../services/HDFS/configuration/hdfs-site.xml | 148 +
.../HDFS/configuration/ranger-hdfs-audit.xml | 121 +
.../ranger-hdfs-plugin-properties.xml | 78 +
.../configuration/ranger-hdfs-policymgr-ssl.xml | 66 +
.../HDFS/configuration/ranger-hdfs-security.xml | 64 +
.../4.2.5/services/HDFS/kerberos.json | 247 +
.../4.2.5/services/HDFS/metainfo.xml | 138 +
.../services/HDFS/quicklinks/quicklinks.json | 92 +
.../4.2.5/services/HDFS/themes/theme.json | 179 +
.../4.2.5/services/HDFS/widgets.json | 649 ++
.../HIVE/configuration/beeline-log4j2.xml | 80 +
.../hive-atlas-application.properties.xml | 61 +
.../services/HIVE/configuration/hive-env.xml | 235 +
.../HIVE/configuration/hive-exec-log4j2.xml | 101 +
.../HIVE/configuration/hive-interactive-env.xml | 257 +
.../configuration/hive-interactive-site.xml | 513 +
.../services/HIVE/configuration/hive-log4j2.xml | 108 +
.../services/HIVE/configuration/hive-site.xml | 1772 ++++
.../HIVE/configuration/hivemetastore-site.xml | 43 +
.../hiveserver2-interactive-site.xml | 52 +
.../HIVE/configuration/hiveserver2-site.xml | 108 +
.../HIVE/configuration/llap-cli-log4j2.xml | 109 +
.../HIVE/configuration/llap-daemon-log4j.xml | 176 +
.../HIVE/configuration/ranger-hive-audit.xml | 183 +
.../ranger-hive-plugin-properties.xml | 62 +
.../configuration/ranger-hive-policymgr-ssl.xml | 66 +
.../HIVE/configuration/ranger-hive-security.xml | 68 +
.../HIVE/configuration/webhcat-site.xml | 128 +
.../HIVE/etc/hive-schema-0.13.0.mysql.sql | 889 ++
.../HIVE/etc/hive-schema-0.13.0.oracle.sql | 835 ++
.../HIVE/etc/hive-schema-0.13.0.postgres.sql | 1538 +++
.../etc/upgrade-0.12.0-to-0.13.0.oracle.sql | 165 +
.../services/HIVE/etc/upgrade-0.13.0.oracle.sql | 38 +
.../4.2.5/services/HIVE/kerberos.json | 149 +
.../4.2.5/services/HIVE/metainfo.xml | 291 +
.../services/HIVE/quicklinks/quicklinks.json | 68 +
.../4.2.5/services/HIVE/themes/theme.json | 327 +
.../KAFKA/configuration/ranger-kafka-audit.xml | 58 +
.../ranger-kafka-policymgr-ssl.xml | 34 +
.../4.2.5/services/KAFKA/kerberos.json | 70 +
.../4.2.5/services/KAFKA/metainfo.xml | 56 +
.../4.2.5/services/KERBEROS/metainfo.xml | 27 +
.../KNOX/configuration/gateway-site.xml | 29 +
.../services/KNOX/configuration/knox-env.xml | 35 +
.../KNOX/configuration/knoxsso-topology.xml | 126 +
.../KNOX/configuration/ranger-knox-audit.xml | 121 +
.../ranger-knox-plugin-properties.xml | 157 +
.../configuration/ranger-knox-policymgr-ssl.xml | 66 +
.../KNOX/configuration/ranger-knox-security.xml | 58 +
.../services/KNOX/configuration/topology.xml | 215 +
.../4.2.5/services/KNOX/kerberos.json | 81 +
.../4.2.5/services/KNOX/metainfo.xml | 61 +
.../4.2.5/services/LOGSEARCH/metainfo.xml | 26 +
.../services/LOGSEARCH/role_command_order.json | 9 +
.../4.2.5/services/OOZIE/metainfo.xml | 65 +
.../PIG/configuration/pig-properties.xml | 632 ++
.../BigInsights/4.2.5/services/PIG/metainfo.xml | 40 +
.../configuration/ranger-tagsync-site.xml | 46 +
.../RANGER/configuration/ranger-ugsync-site.xml | 46 +
.../4.2.5/services/RANGER/metainfo.xml | 76 +
.../RANGER_KMS/configuration/dbks-site.xml | 104 +
.../RANGER_KMS/configuration/kms-env.xml | 44 +
.../configuration/ranger-kms-audit.xml | 85 +
.../configuration/ranger-kms-policymgr-ssl.xml | 34 +
.../4.2.5/services/RANGER_KMS/kerberos.json | 84 +
.../4.2.5/services/RANGER_KMS/metainfo.xml | 56 +
.../RANGER_KMS/themes/theme_version_2.json | 303 +
.../4.2.5/services/SLIDER/metainfo.xml | 46 +
.../SPARK/configuration/spark-defaults.xml | 32 +
.../configuration/spark-thrift-sparkconf.xml | 32 +
.../4.2.5/services/SPARK/kerberos.json | 70 +
.../4.2.5/services/SPARK/metainfo.xml | 67 +
.../SPARK2/configuration/spark2-defaults.xml | 130 +
.../SPARK2/configuration/spark2-env.xml | 146 +
.../configuration/spark2-hive-site-override.xml | 67 +
.../spark2-javaopts-properties.xml | 29 +
.../configuration/spark2-thrift-sparkconf.xml | 168 +
.../4.2.5/services/SPARK2/metainfo.xml | 112 +
.../sqoop-atlas-application.properties.xml | 47 +
.../services/SQOOP/configuration/sqoop-site.xml | 30 +
.../4.2.5/services/SQOOP/kerberos.json | 20 +
.../4.2.5/services/SQOOP/metainfo.xml | 47 +
.../4.2.5/services/YARN/YARN_widgets.json | 670 ++
.../YARN/configuration-mapred/mapred-env.xml | 50 +
.../YARN/configuration-mapred/mapred-site.xml | 139 +
.../YARN/configuration/capacity-scheduler.xml | 70 +
.../YARN/configuration/ranger-yarn-audit.xml | 121 +
.../ranger-yarn-plugin-properties.xml | 82 +
.../configuration/ranger-yarn-policymgr-ssl.xml | 66 +
.../YARN/configuration/ranger-yarn-security.xml | 58 +
.../services/YARN/configuration/yarn-env.xml | 198 +
.../services/YARN/configuration/yarn-log4j.xml | 103 +
.../services/YARN/configuration/yarn-site.xml | 762 ++
.../4.2.5/services/YARN/kerberos.json | 278 +
.../4.2.5/services/YARN/metainfo.xml | 140 +
.../YARN/quicklinks-mapred/quicklinks.json | 92 +
.../services/YARN/quicklinks/quicklinks.json | 92 +
.../services/YARN/themes-mapred/theme.json | 132 +
.../4.2.5/services/YARN/themes/theme.json | 250 +
.../4.2.5/services/ZOOKEEPER/metainfo.xml | 37 +
.../BigInsights/4.2.5/services/stack_advisor.py | 180 +
.../4.2.5/upgrades/config-upgrade.xml | 135 +
.../upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 800 ++
.../stacks/BigInsights/4.2/metainfo.xml | 25 +
.../stacks/BigInsights/4.2/repos/repoinfo.xml | 44 +
.../4.2/repos/repoinfo.xml.amd64_RH6 | 32 +
.../4.2/repos/repoinfo.xml.amd64_RH7 | 32 +
.../4.2/repos/repoinfo.xml.amd64_SLES | 32 +
.../4.2/repos/repoinfo.xml.ppc64le_RH7 | 32 +
.../4.2/repos/repoinfo.xml.s390x_RH7 | 32 +
.../BigInsights/4.2/role_command_order.json | 31 +
.../4.2/services/AMBARI_METRICS/alerts.json | 183 +
.../AMBARI_METRICS/configuration/ams-env.xml | 114 +
.../configuration/ams-hbase-env.xml | 245 +
.../configuration/ams-hbase-log4j.xml | 147 +
.../configuration/ams-hbase-policy.xml | 56 +
.../configuration/ams-hbase-security-site.xml | 167 +
.../configuration/ams-hbase-site.xml | 431 +
.../AMBARI_METRICS/configuration/ams-log4j.xml | 66 +
.../AMBARI_METRICS/configuration/ams-site.xml | 578 ++
.../4.2/services/AMBARI_METRICS/kerberos.json | 122 +
.../4.2/services/AMBARI_METRICS/metainfo.xml | 147 +
.../4.2/services/AMBARI_METRICS/metrics.json | 2472 +++++
.../alerts/alert_ambari_metrics_monitor.py | 104 +
.../package/files/hbaseSmokeVerify.sh | 34 +
.../files/service-metrics/AMBARI_METRICS.txt | 245 +
.../package/files/service-metrics/FLUME.txt | 17 +
.../package/files/service-metrics/HBASE.txt | 588 ++
.../package/files/service-metrics/HDFS.txt | 277 +
.../package/files/service-metrics/HOST.txt | 37 +
.../package/files/service-metrics/KAFKA.txt | 190 +
.../package/files/service-metrics/STORM.txt | 7 +
.../package/files/service-metrics/YARN.txt | 178 +
.../AMBARI_METRICS/package/scripts/__init__.py | 19 +
.../AMBARI_METRICS/package/scripts/ams.py | 388 +
.../package/scripts/ams_service.py | 103 +
.../AMBARI_METRICS/package/scripts/functions.py | 51 +
.../AMBARI_METRICS/package/scripts/hbase.py | 267 +
.../package/scripts/hbase_master.py | 70 +
.../package/scripts/hbase_regionserver.py | 66 +
.../package/scripts/hbase_service.py | 53 +
.../package/scripts/metrics_collector.py | 133 +
.../package/scripts/metrics_monitor.py | 59 +
.../AMBARI_METRICS/package/scripts/params.py | 257 +
.../package/scripts/params_linux.py | 50 +
.../package/scripts/params_windows.py | 53 +
.../package/scripts/service_check.py | 166 +
.../package/scripts/service_mapping.py | 22 +
.../package/scripts/split_points.py | 236 +
.../AMBARI_METRICS/package/scripts/status.py | 46 +
.../package/scripts/status_params.py | 39 +
.../package/templates/ams.conf.j2 | 35 +
.../templates/ams_collector_jaas.conf.j2 | 26 +
.../templates/ams_zookeeper_jaas.conf.j2 | 26 +
.../hadoop-metrics2-hbase.properties.j2 | 63 +
.../package/templates/hbase_client_jaas.conf.j2 | 23 +
.../templates/hbase_grant_permissions.j2 | 39 +
.../package/templates/hbase_master_jaas.conf.j2 | 26 +
.../templates/hbase_regionserver_jaas.conf.j2 | 26 +
.../package/templates/metric_groups.conf.j2 | 37 +
.../package/templates/metric_monitor.ini.j2 | 31 +
.../package/templates/regionservers.j2 | 20 +
.../package/templates/smoketest_metrics.json.j2 | 15 +
.../BigInsights/4.2/services/FLUME/alerts.json | 27 +
.../services/FLUME/configuration/flume-conf.xml | 38 +
.../services/FLUME/configuration/flume-env.xml | 103 +
.../4.2/services/FLUME/kerberos.json | 51 +
.../BigInsights/4.2/services/FLUME/metainfo.xml | 69 +
.../BigInsights/4.2/services/FLUME/metrics.json | 430 +
.../package/alerts/alert_flume_agent_status.py | 106 +
.../4.2/services/FLUME/package/scripts/flume.py | 229 +
.../FLUME/package/scripts/flume_check.py | 40 +
.../FLUME/package/scripts/flume_handler.py | 145 +
.../FLUME/package/scripts/flume_upgrade.py | 88 +
.../services/FLUME/package/scripts/params.py | 101 +
.../FLUME/package/scripts/params_linux.py | 30 +
.../templates/flume-metrics2.properties.j2 | 26 +
.../FLUME/package/templates/flume.conf.j2 | 24 +
.../FLUME/package/templates/log4j.properties.j2 | 67 +
.../BigInsights/4.2/services/HBASE/alerts.json | 157 +
.../services/HBASE/configuration/hbase-env.xml | 205 +
.../configuration/hbase-javaopts-properties.xml | 29 +
.../HBASE/configuration/hbase-log4j.xml | 147 +
.../HBASE/configuration/hbase-policy.xml | 56 +
.../services/HBASE/configuration/hbase-site.xml | 816 ++
.../HBASE/configuration/ranger-hbase-audit.xml | 193 +
.../ranger-hbase-plugin-properties.xml | 234 +
.../ranger-hbase-policymgr-ssl.xml | 67 +
.../configuration/ranger-hbase-security.xml | 75 +
.../4.2/services/HBASE/kerberos.json | 188 +
.../BigInsights/4.2/services/HBASE/metainfo.xml | 176 +
.../BigInsights/4.2/services/HBASE/metrics.json | 9420 ++++++++++++++++++
.../HBASE/package/files/draining_servers.rb | 164 +
.../HBASE/package/files/hbaseSmokeVerify.sh | 34 +
.../services/HBASE/package/scripts/__init__.py | 19 +
.../services/HBASE/package/scripts/functions.py | 54 +
.../4.2/services/HBASE/package/scripts/hbase.py | 236 +
.../HBASE/package/scripts/hbase_client.py | 72 +
.../HBASE/package/scripts/hbase_decommission.py | 74 +
.../HBASE/package/scripts/hbase_master.py | 131 +
.../HBASE/package/scripts/hbase_regionserver.py | 132 +
.../package/scripts/hbase_restgatewayserver.py | 83 +
.../HBASE/package/scripts/hbase_service.py | 51 +
.../HBASE/package/scripts/hbase_upgrade.py | 37 +
.../services/HBASE/package/scripts/params.py | 363 +
.../package/scripts/phoenix_queryserver.py | 77 +
.../HBASE/package/scripts/phoenix_service.py | 50 +
.../HBASE/package/scripts/service_check.py | 79 +
.../HBASE/package/scripts/setup_ranger_hbase.py | 84 +
.../HBASE/package/scripts/status_params.py | 46 +
.../services/HBASE/package/scripts/upgrade.py | 52 +
...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 109 +
...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 107 +
.../HBASE/package/templates/hbase-smoke.sh.j2 | 44 +
.../HBASE/package/templates/hbase.conf.j2 | 35 +
.../package/templates/hbase_client_jaas.conf.j2 | 23 +
.../templates/hbase_grant_permissions.j2 | 39 +
.../package/templates/hbase_master_jaas.conf.j2 | 26 +
.../templates/hbase_queryserver_jaas.conf.j2 | 26 +
.../templates/hbase_regionserver_jaas.conf.j2 | 26 +
.../package/templates/hbase_rest_jaas.conf.j2 | 26 +
.../HBASE/package/templates/regionservers.j2 | 20 +
.../services/HBASE/quicklinks/quicklinks.json | 121 +
.../BigInsights/4.2/services/HBASE/widgets.json | 510 +
.../BigInsights/4.2/services/HDFS/alerts.json | 760 ++
.../services/HDFS/configuration/core-site.xml | 250 +
.../services/HDFS/configuration/hadoop-env.xml | 415 +
.../HDFS/configuration/hadoop-policy.xml | 145 +
.../services/HDFS/configuration/hdfs-log4j.xml | 226 +
.../services/HDFS/configuration/hdfs-site.xml | 752 ++
.../HDFS/configuration/ranger-hdfs-audit.xml | 193 +
.../ranger-hdfs-plugin-properties.xml | 244 +
.../configuration/ranger-hdfs-policymgr-ssl.xml | 67 +
.../HDFS/configuration/ranger-hdfs-security.xml | 71 +
.../services/HDFS/configuration/ssl-client.xml | 65 +
.../services/HDFS/configuration/ssl-server.xml | 72 +
.../BigInsights/4.2/services/HDFS/kerberos.json | 230 +
.../BigInsights/4.2/services/HDFS/metainfo.xml | 283 +
.../BigInsights/4.2/services/HDFS/metrics.json | 7899 +++++++++++++++
.../package/alerts/alert_checkpoint_time.py | 223 +
.../alerts/alert_datanode_unmounted_data_dir.py | 164 +
.../package/alerts/alert_ha_namenode_health.py | 261 +
.../package/alerts/alert_upgrade_finalized.py | 171 +
.../HDFS/package/files/checkForFormat.sh | 71 +
.../services/HDFS/package/files/checkWebUI.py | 54 +
.../services/HDFS/package/scripts/__init__.py | 20 +
.../scripts/balancer-emulator/hdfs-command.py | 45 +
.../services/HDFS/package/scripts/datanode.py | 158 +
.../HDFS/package/scripts/datanode_upgrade.py | 141 +
.../4.2/services/HDFS/package/scripts/hdfs.py | 131 +
.../HDFS/package/scripts/hdfs_client.py | 113 +
.../HDFS/package/scripts/hdfs_datanode.py | 76 +
.../HDFS/package/scripts/hdfs_namenode.py | 488 +
.../HDFS/package/scripts/hdfs_nfsgateway.py | 72 +
.../HDFS/package/scripts/hdfs_rebalance.py | 130 +
.../HDFS/package/scripts/hdfs_snamenode.py | 53 +
.../HDFS/package/scripts/install_params.py | 39 +
.../HDFS/package/scripts/journalnode.py | 169 +
.../HDFS/package/scripts/journalnode_upgrade.py | 140 +
.../services/HDFS/package/scripts/namenode.py | 334 +
.../HDFS/package/scripts/namenode_ha_state.py | 216 +
.../HDFS/package/scripts/namenode_upgrade.py | 269 +
.../services/HDFS/package/scripts/nfsgateway.py | 137 +
.../4.2/services/HDFS/package/scripts/params.py | 463 +
.../HDFS/package/scripts/service_check.py | 109 +
.../HDFS/package/scripts/setup_ranger_hdfs.py | 90 +
.../services/HDFS/package/scripts/snamenode.py | 142 +
.../HDFS/package/scripts/status_params.py | 44 +
.../4.2/services/HDFS/package/scripts/utils.py | 407 +
.../services/HDFS/package/scripts/zkfc_slave.py | 150 +
.../package/templates/exclude_hosts_list.j2 | 21 +
.../HDFS/package/templates/hdfs.conf.j2 | 35 +
.../services/HDFS/package/templates/slaves.j2 | 21 +
.../services/HDFS/quicklinks/quicklinks.json | 92 +
.../BigInsights/4.2/services/HDFS/widgets.json | 644 ++
.../BigInsights/4.2/services/HIVE/alerts.json | 111 +
.../services/HIVE/configuration/hcat-env.xml | 58 +
.../services/HIVE/configuration/hive-env.xml | 351 +
.../HIVE/configuration/hive-exec-log4j.xml | 122 +
.../services/HIVE/configuration/hive-log4j.xml | 140 +
.../services/HIVE/configuration/hive-site.xml | 1961 ++++
.../HIVE/configuration/hiveserver2-site.xml | 77 +
.../HIVE/configuration/ranger-hive-audit.xml | 193 +
.../ranger-hive-plugin-properties.xml | 223 +
.../configuration/ranger-hive-policymgr-ssl.xml | 67 +
.../HIVE/configuration/ranger-hive-security.xml | 76 +
.../services/HIVE/configuration/webhcat-env.xml | 55 +
.../HIVE/configuration/webhcat-log4j.xml | 82 +
.../HIVE/configuration/webhcat-site.xml | 184 +
.../HIVE/etc/hive-schema-0.12.0.mysql.sql | 777 ++
.../HIVE/etc/hive-schema-0.12.0.oracle.sql | 718 ++
.../HIVE/etc/hive-schema-0.12.0.postgres.sql | 1406 +++
.../HIVE/etc/hive-schema-0.13.0.mysql.sql | 889 ++
.../HIVE/etc/hive-schema-0.13.0.oracle.sql | 835 ++
.../HIVE/etc/hive-schema-0.13.0.postgres.sql | 1538 +++
.../HIVE/etc/hive-schema-0.14.0.mysql.sql | 889 ++
.../HIVE/etc/hive-schema-0.14.0.oracle.sql | 833 ++
.../HIVE/etc/hive-schema-0.14.0.postgres.sql | 1541 +++
.../etc/upgrade-0.12.0-to-0.13.0.oracle.sql | 165 +
.../services/HIVE/etc/upgrade-0.13.0.oracle.sql | 38 +
.../BigInsights/4.2/services/HIVE/kerberos.json | 114 +
.../BigInsights/4.2/services/HIVE/metainfo.xml | 351 +
.../HIVE/package/alerts/alert_hive_metastore.py | 193 +
.../package/alerts/alert_hive_thrift_port.py | 269 +
.../HIVE/package/alerts/alert_webhcat_server.py | 228 +
.../package/etc/hive-schema-0.12.0.mysql.sql | 777 ++
.../package/etc/hive-schema-0.12.0.oracle.sql | 718 ++
.../package/etc/hive-schema-0.12.0.postgres.sql | 1406 +++
.../services/HIVE/package/files/addMysqlUser.sh | 37 +
.../services/HIVE/package/files/hcatSmoke.sh | 41 +
.../services/HIVE/package/files/hiveSmoke.sh | 24 +
.../services/HIVE/package/files/hiveserver2.sql | 23 +
.../HIVE/package/files/hiveserver2Smoke.sh | 32 +
.../4.2/services/HIVE/package/files/pigSmoke.sh | 18 +
.../HIVE/package/files/removeMysqlUser.sh | 33 +
.../HIVE/package/files/startMetastore.sh | 25 +
.../HIVE/package/files/templetonSmoke.sh | 93 +
.../services/HIVE/package/scripts/__init__.py | 19 +
.../4.2/services/HIVE/package/scripts/hcat.py | 73 +
.../HIVE/package/scripts/hcat_client.py | 50 +
.../HIVE/package/scripts/hcat_service_check.py | 78 +
.../4.2/services/HIVE/package/scripts/hive.py | 432 +
.../HIVE/package/scripts/hive_client.py | 83 +
.../HIVE/package/scripts/hive_metastore.py | 234 +
.../HIVE/package/scripts/hive_server.py | 177 +
.../HIVE/package/scripts/hive_server_upgrade.py | 138 +
.../HIVE/package/scripts/hive_service.py | 143 +
.../HIVE/package/scripts/mysql_server.py | 64 +
.../HIVE/package/scripts/mysql_service.py | 57 +
.../HIVE/package/scripts/mysql_users.py | 70 +
.../HIVE/package/scripts/mysql_utils.py | 35 +
.../4.2/services/HIVE/package/scripts/params.py | 29 +
.../HIVE/package/scripts/params_linux.py | 565 ++
.../HIVE/package/scripts/params_windows.py | 76 +
.../HIVE/package/scripts/postgresql_server.py | 109 +
.../HIVE/package/scripts/postgresql_service.py | 39 +
.../HIVE/package/scripts/service_check.py | 91 +
.../HIVE/package/scripts/setup_ranger_hive.py | 77 +
.../HIVE/package/scripts/status_params.py | 96 +
.../services/HIVE/package/scripts/webhcat.py | 111 +
.../HIVE/package/scripts/webhcat_server.py | 147 +
.../HIVE/package/scripts/webhcat_service.py | 75 +
.../package/scripts/webhcat_service_check.py | 120 +
.../HIVE/package/templates/hive.conf.j2 | 36 +
.../package/templates/startHiveserver2.sh.j2 | 24 +
.../package/templates/templeton_smoke.pig.j2 | 24 +
.../BigInsights/4.2/services/KAFKA/alerts.json | 32 +
.../KAFKA/configuration/kafka-broker.xml | 475 +
.../services/KAFKA/configuration/kafka-env.xml | 86 +
.../KAFKA/configuration/kafka-log4j.xml | 118 +
.../4.2/services/KAFKA/kerberos.json | 50 +
.../BigInsights/4.2/services/KAFKA/metainfo.xml | 84 +
.../BigInsights/4.2/services/KAFKA/metrics.json | 239 +
.../4.2/services/KAFKA/package/scripts/kafka.py | 243 +
.../KAFKA/package/scripts/kafka_broker.py | 107 +
.../services/KAFKA/package/scripts/params.py | 157 +
.../KAFKA/package/scripts/service_check.py | 65 +
.../KAFKA/package/scripts/status_params.py | 26 +
.../services/KAFKA/package/scripts/upgrade.py | 78 +
.../4.2/services/KAFKA/package/scripts/utils.py | 38 +
.../KAFKA/package/templates/kafka.conf.j2 | 35 +
.../package/templates/kafka_client_jaas.conf.j2 | 29 +
.../KAFKA/package/templates/kafka_jaas.conf.j2 | 41 +
.../package/templates/tools-log4j.properties.j2 | 21 +
.../KERBEROS/configuration/kerberos-env.xml | 326 +
.../KERBEROS/configuration/krb5-conf.xml | 113 +
.../4.2/services/KERBEROS/kerberos.json | 17 +
.../4.2/services/KERBEROS/metainfo.xml | 147 +
.../KERBEROS/package/scripts/kerberos_client.py | 79 +
.../KERBEROS/package/scripts/kerberos_common.py | 473 +
.../KERBEROS/package/scripts/kerberos_server.py | 141 +
.../services/KERBEROS/package/scripts/params.py | 200 +
.../KERBEROS/package/scripts/service_check.py | 81 +
.../KERBEROS/package/scripts/status_params.py | 32 +
.../services/KERBEROS/package/scripts/utils.py | 105 +
.../KERBEROS/package/templates/kadm5_acl.j2 | 20 +
.../KERBEROS/package/templates/kdc_conf.j2 | 30 +
.../KERBEROS/package/templates/krb5_conf.j2 | 55 +
.../BigInsights/4.2/services/KNOX/alerts.json | 32 +
.../KNOX/configuration/gateway-log4j.xml | 84 +
.../KNOX/configuration/gateway-site.xml | 79 +
.../services/KNOX/configuration/knox-env.xml | 81 +
.../services/KNOX/configuration/ldap-log4j.xml | 67 +
.../KNOX/configuration/ranger-knox-audit.xml | 193 +
.../ranger-knox-plugin-properties.xml | 241 +
.../configuration/ranger-knox-policymgr-ssl.xml | 67 +
.../KNOX/configuration/ranger-knox-security.xml | 65 +
.../services/KNOX/configuration/topology.xml | 182 +
.../services/KNOX/configuration/users-ldif.xml | 139 +
.../BigInsights/4.2/services/KNOX/kerberos.json | 69 +
.../BigInsights/4.2/services/KNOX/metainfo.xml | 92 +
.../KNOX/package/files/validateKnoxStatus.py | 43 +
.../4.2/services/KNOX/package/scripts/knox.py | 162 +
.../KNOX/package/scripts/knox_gateway.py | 307 +
.../services/KNOX/package/scripts/knox_ldap.py | 54 +
.../4.2/services/KNOX/package/scripts/ldap.py | 55 +
.../4.2/services/KNOX/package/scripts/params.py | 459 +
.../KNOX/package/scripts/service_check.py | 92 +
.../KNOX/package/scripts/setup_ranger_knox.py | 77 +
.../KNOX/package/scripts/status_params.py | 50 +
.../services/KNOX/package/scripts/upgrade.py | 93 +
.../package/templates/krb5JAASLogin.conf.j2 | 30 +
.../BigInsights/4.2/services/OOZIE/alerts.json | 45 +
.../services/OOZIE/configuration/oozie-env.xml | 201 +
.../OOZIE/configuration/oozie-log4j.xml | 147 +
.../services/OOZIE/configuration/oozie-site.xml | 416 +
.../4.2/services/OOZIE/kerberos.json | 70 +
.../BigInsights/4.2/services/OOZIE/metainfo.xml | 172 +
.../package/alerts/alert_check_oozie_server.py | 211 +
.../services/OOZIE/package/files/oozieSmoke2.sh | 90 +
.../files/prepareOozieHdfsDirectories.sh | 46 +
.../OOZIE/package/files/wrap_ooziedb.sh | 31 +
.../4.2/services/OOZIE/package/scripts/oozie.py | 279 +
.../OOZIE/package/scripts/oozie_client.py | 76 +
.../OOZIE/package/scripts/oozie_server.py | 193 +
.../package/scripts/oozie_server_upgrade.py | 300 +
.../OOZIE/package/scripts/oozie_service.py | 124 +
.../services/OOZIE/package/scripts/params.py | 262 +
.../OOZIE/package/scripts/service_check.py | 140 +
.../OOZIE/package/scripts/status_params.py | 47 +
.../OOZIE/package/templates/adminusers.txt.j2 | 28 +
.../package/templates/oozie-log4j.properties.j2 | 93 +
.../services/OOZIE/quicklinks/quicklinks.json | 48 +
.../4.2/services/PIG/configuration/pig-env.xml | 39 +
.../services/PIG/configuration/pig-log4j.xml | 66 +
.../PIG/configuration/pig-properties.xml | 632 ++
.../BigInsights/4.2/services/PIG/kerberos.json | 17 +
.../BigInsights/4.2/services/PIG/metainfo.xml | 87 +
.../4.2/services/PIG/package/files/pigSmoke.sh | 18 +
.../4.2/services/PIG/package/scripts/params.py | 25 +
.../PIG/package/scripts/params_linux.py | 88 +
.../4.2/services/PIG/package/scripts/pig.py | 61 +
.../services/PIG/package/scripts/pig_client.py | 59 +
.../PIG/package/scripts/service_check.py | 123 +
.../BigInsights/4.2/services/RANGER/alerts.json | 74 +
.../RANGER/configuration/admin-properties.xml | 287 +
.../RANGER/configuration/ranger-admin-site.xml | 571 ++
.../RANGER/configuration/ranger-env.xml | 465 +
.../RANGER/configuration/ranger-site.xml | 76 +
.../RANGER/configuration/ranger-ugsync-site.xml | 496 +
.../configuration/usersync-properties.xml | 126 +
.../4.2/services/RANGER/metainfo.xml | 107 +
.../alerts/alert_ranger_admin_passwd_check.py | 180 +
.../services/RANGER/package/scripts/params.py | 208 +
.../RANGER/package/scripts/ranger_admin.py | 194 +
.../RANGER/package/scripts/ranger_service.py | 47 +
.../RANGER/package/scripts/ranger_usersync.py | 82 +
.../RANGER/package/scripts/service_check.py | 51 +
.../RANGER/package/scripts/setup_ranger.py | 137 +
.../RANGER/package/scripts/setup_ranger_xml.py | 467 +
.../services/RANGER/package/scripts/upgrade.py | 30 +
.../4.2/services/RANGER/themes/theme.json | 1397 +++
.../4.2/services/RANGER_KMS/alerts.json | 32 +
.../RANGER_KMS/configuration/dbks-site.xml | 101 +
.../RANGER_KMS/configuration/kms-env.xml | 55 +
.../RANGER_KMS/configuration/kms-log4j.xml | 69 +
.../RANGER_KMS/configuration/kms-properties.xml | 104 +
.../RANGER_KMS/configuration/kms-site.xml | 146 +
.../configuration/ranger-kms-audit.xml | 153 +
.../configuration/ranger-kms-policymgr-ssl.xml | 73 +
.../configuration/ranger-kms-security.xml | 65 +
.../configuration/ranger-kms-site.xml | 66 +
.../4.2/services/RANGER_KMS/kerberos.json | 49 +
.../4.2/services/RANGER_KMS/metainfo.xml | 89 +
.../services/RANGER_KMS/package/scripts/kms.py | 489 +
.../RANGER_KMS/package/scripts/kms_server.py | 96 +
.../RANGER_KMS/package/scripts/kms_service.py | 49 +
.../RANGER_KMS/package/scripts/params.py | 246 +
.../RANGER_KMS/package/scripts/service_check.py | 41 +
.../RANGER_KMS/package/scripts/upgrade.py | 29 +
.../SLIDER/configuration/slider-client.xml | 61 +
.../SLIDER/configuration/slider-env.xml | 44 +
.../SLIDER/configuration/slider-log4j.xml | 93 +
.../4.2/services/SLIDER/kerberos.json | 17 +
.../4.2/services/SLIDER/metainfo.xml | 128 +
.../SLIDER/package/files/hbaseSmokeVerify.sh | 34 +
.../services/SLIDER/package/scripts/__init__.py | 19 +
.../services/SLIDER/package/scripts/params.py | 65 +
.../SLIDER/package/scripts/params_linux.py | 75 +
.../SLIDER/package/scripts/params_windows.py | 45 +
.../SLIDER/package/scripts/service_check.py | 59 +
.../services/SLIDER/package/scripts/slider.py | 97 +
.../SLIDER/package/scripts/slider_client.py | 71 +
.../package/templates/storm-slider-env.sh.j2 | 38 +
.../services/SOLR/configuration/solr-env.xml | 244 +
.../services/SOLR/configuration/solr-log4j.xml | 83 +
.../services/SOLR/configuration/solr-site.xml | 47 +
.../BigInsights/4.2/services/SOLR/kerberos.json | 53 +
.../BigInsights/4.2/services/SOLR/metainfo.xml | 82 +
.../services/SOLR/package/scripts/__init__.py | 19 +
.../4.2/services/SOLR/package/scripts/params.py | 205 +
.../SOLR/package/scripts/service_check.py | 61 +
.../4.2/services/SOLR/package/scripts/solr.py | 100 +
.../SOLR/package/scripts/solr_client.py | 36 +
.../SOLR/package/scripts/solr_server.py | 107 +
.../SOLR/package/scripts/solr_service.py | 72 +
.../SOLR/package/scripts/solr_upgrade.py | 135 +
.../SOLR/package/scripts/status_params.py | 32 +
.../services/SOLR/package/templates/solr.xml.j2 | 51 +
.../SOLR/package/templates/solr_jaas.conf.j2 | 26 +
.../BigInsights/4.2/services/SPARK/alerts.json | 32 +
.../SPARK/configuration/spark-defaults.xml | 175 +
.../services/SPARK/configuration/spark-env.xml | 120 +
.../configuration/spark-javaopts-properties.xml | 28 +
.../SPARK/configuration/spark-log4j.xml | 43 +
.../configuration/spark-metrics-properties.xml | 161 +
.../4.2/services/SPARK/kerberos.json | 55 +
.../BigInsights/4.2/services/SPARK/metainfo.xml | 197 +
.../SPARK/package/scripts/job_history_server.py | 167 +
.../services/SPARK/package/scripts/params.py | 216 +
.../SPARK/package/scripts/service_check.py | 132 +
.../4.2/services/SPARK/package/scripts/spark.py | 353 +
.../SPARK/package/scripts/spark_check.py | 76 +
.../SPARK/package/scripts/spark_client.py | 62 +
.../SPARK/package/scripts/status_params.py | 36 +
.../SPARK/package/scripts/thrift_server.py | 119 +
.../package/templates/spark-defaults.conf.j2 | 43 +
.../services/SPARK/quicklinks/quicklinks.json | 47 +
.../services/SQOOP/configuration/sqoop-env.xml | 62 +
.../BigInsights/4.2/services/SQOOP/metainfo.xml | 95 +
.../services/SQOOP/package/scripts/__init__.py | 19 +
.../services/SQOOP/package/scripts/params.py | 95 +
.../SQOOP/package/scripts/service_check.py | 44 +
.../4.2/services/SQOOP/package/scripts/sqoop.py | 85 +
.../SQOOP/package/scripts/sqoop_client.py | 57 +
.../4.2/services/SYSTEMML/metainfo.xml | 77 +
.../SYSTEMML/package/scripts/__init__.py | 19 +
.../services/SYSTEMML/package/scripts/params.py | 40 +
.../SYSTEMML/package/scripts/service_check.py | 43 +
.../SYSTEMML/package/scripts/systemml_client.py | 49 +
.../services/TITAN/configuration/titan-env.xml | 48 +
.../TITAN/configuration/titan-hbase-solr.xml | 67 +
.../TITAN/configuration/titan-log4j.xml | 66 +
.../4.2/services/TITAN/kerberos.json | 17 +
.../BigInsights/4.2/services/TITAN/metainfo.xml | 88 +
.../TITAN/package/files/titanSmoke.groovy | 20 +
.../services/TITAN/package/scripts/params.py | 128 +
.../TITAN/package/scripts/service_check.py | 64 +
.../4.2/services/TITAN/package/scripts/titan.py | 70 +
.../TITAN/package/scripts/titan_client.py | 58 +
.../4.2/services/YARN/MAPREDUCE2_metrics.json | 2596 +++++
.../4.2/services/YARN/YARN_metrics.json | 3486 +++++++
.../4.2/services/YARN/YARN_widgets.json | 617 ++
.../BigInsights/4.2/services/YARN/alerts.json | 414 +
.../YARN/configuration-mapred/mapred-env.xml | 103 +
.../YARN/configuration-mapred/mapred-site.xml | 585 ++
.../YARN/configuration/capacity-scheduler.xml | 172 +
.../YARN/configuration/ranger-yarn-audit.xml | 193 +
.../ranger-yarn-plugin-properties.xml | 86 +
.../configuration/ranger-yarn-policymgr-ssl.xml | 67 +
.../YARN/configuration/ranger-yarn-security.xml | 65 +
.../services/YARN/configuration/yarn-env.xml | 262 +
.../services/YARN/configuration/yarn-log4j.xml | 84 +
.../services/YARN/configuration/yarn-site.xml | 1160 +++
.../BigInsights/4.2/services/YARN/kerberos.json | 224 +
.../BigInsights/4.2/services/YARN/metainfo.xml | 286 +
.../package/alerts/alert_nodemanager_health.py | 201 +
.../alerts/alert_nodemanagers_summary.py | 197 +
.../files/validateYarnComponentStatus.py | 170 +
.../services/YARN/package/scripts/__init__.py | 20 +
.../scripts/application_timeline_server.py | 139 +
.../YARN/package/scripts/historyserver.py | 158 +
.../package/scripts/mapred_service_check.py | 80 +
.../YARN/package/scripts/mapreduce2_client.py | 56 +
.../YARN/package/scripts/nodemanager.py | 144 +
.../YARN/package/scripts/nodemanager_upgrade.py | 76 +
.../4.2/services/YARN/package/scripts/params.py | 412 +
.../YARN/package/scripts/resourcemanager.py | 181 +
.../services/YARN/package/scripts/service.py | 81 +
.../YARN/package/scripts/service_check.py | 91 +
.../YARN/package/scripts/setup_ranger_yarn.py | 67 +
.../YARN/package/scripts/status_params.py | 44 +
.../4.2/services/YARN/package/scripts/yarn.py | 445 +
.../YARN/package/scripts/yarn_client.py | 56 +
.../package/templates/container-executor.cfg.j2 | 40 +
.../package/templates/exclude_hosts_list.j2 | 21 +
.../YARN/package/templates/mapreduce.conf.j2 | 35 +
.../package/templates/taskcontroller.cfg.j2 | 38 +
.../YARN/package/templates/yarn.conf.j2 | 35 +
.../YARN/quicklinks-mapred/quicklinks.json | 92 +
.../services/YARN/quicklinks/quicklinks.json | 92 +
.../4.2/services/ZOOKEEPER/alerts.json | 58 +
.../ZOOKEEPER/configuration/zoo.cfg.xml | 91 +
.../ZOOKEEPER/configuration/zookeeper-env.xml | 77 +
.../ZOOKEEPER/configuration/zookeeper-log4j.xml | 102 +
.../4.2/services/ZOOKEEPER/kerberos.json | 39 +
.../4.2/services/ZOOKEEPER/metainfo.xml | 91 +
.../services/ZOOKEEPER/package/files/zkEnv.sh | 96 +
.../ZOOKEEPER/package/files/zkServer.sh | 120 +
.../ZOOKEEPER/package/files/zkService.sh | 26 +
.../services/ZOOKEEPER/package/files/zkSmoke.sh | 93 +
.../ZOOKEEPER/package/scripts/__init__.py | 20 +
.../ZOOKEEPER/package/scripts/params.py | 96 +
.../ZOOKEEPER/package/scripts/service_check.py | 53 +
.../ZOOKEEPER/package/scripts/status_params.py | 43 +
.../ZOOKEEPER/package/scripts/zookeeper.py | 114 +
.../package/scripts/zookeeper_client.py | 71 +
.../package/scripts/zookeeper_server.py | 161 +
.../package/scripts/zookeeper_service.py | 58 +
.../package/templates/configuration.xsl.j2 | 42 +
.../ZOOKEEPER/package/templates/zoo.cfg.j2 | 53 +
.../templates/zookeeper_client_jaas.conf.j2 | 23 +
.../package/templates/zookeeper_jaas.conf.j2 | 26 +
.../BigInsights/4.2/services/stack_advisor.py | 105 +
.../BigInsights/4.2/upgrades/config-upgrade.xml | 135 +
.../upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 800 ++
.../HDP/2.0.6/configuration/cluster-env.xml | 16 +-
.../HDP/2.0.6/properties/stack_features.json | 852 +-
.../HDP/2.0.6/properties/stack_tools.json | 16 +-
.../PERF/1.0/configuration/cluster-env.xml | 16 +-
.../PERF/1.0/properties/stack_features.json | 38 +-
.../stacks/PERF/1.0/properties/stack_tools.json | 16 +-
.../actionmanager/TestActionDBAccessorImpl.java | 44 +-
.../ComponentVersionAlertRunnableTest.java | 5 +-
.../creator/UpgradeEventCreatorTest.java | 6 +-
.../checks/PreviousUpgradeCompletedTest.java | 13 +-
.../BlueprintConfigurationProcessorTest.java | 38 +-
...ClusterStackVersionResourceProviderTest.java | 276 +-
.../PreUpgradeCheckResourceProviderTest.java | 12 +-
.../StackUpgradeConfigurationMergeTest.java | 35 +-
.../UpgradeResourceProviderHDP22Test.java | 15 +-
.../internal/UpgradeResourceProviderTest.java | 130 +-
.../UpgradeSummaryResourceProviderTest.java | 22 +-
.../upgrade/StackVersionListenerTest.java | 98 +-
.../ambari/server/orm/dao/UpgradeDAOTest.java | 32 +-
.../ChangeStackReferencesActionTest.java | 102 +
.../ComponentVersionCheckActionTest.java | 120 +-
.../upgrades/UpgradeActionTest.java | 265 +-
.../server/state/ServiceComponentTest.java | 31 +-
.../ambari/server/state/UpgradeHelperTest.java | 319 +-
.../cluster/ClusterEffectiveVersionTest.java | 68 +-
.../services/RetryUpgradeActionServiceTest.java | 30 +-
.../stack/upgrade/StageWrapperBuilderTest.java | 32 +-
.../ClusterConfigurationRequestTest.java | 113 +-
.../common-services/configs/hawq_default.json | 6 +-
.../python/host_scripts/TestAlertDiskSpace.py | 16 +-
.../2.0.6/HBASE/test_phoenix_queryserver.py | 23 -
.../stacks/2.0.6/YARN/test_historyserver.py | 21 +-
.../2.5/configs/ranger-admin-default.json | 990 +-
.../2.5/configs/ranger-admin-secured.json | 1108 +-
.../stacks/2.5/configs/ranger-kms-default.json | 1158 +--
.../stacks/2.5/configs/ranger-kms-secured.json | 1320 +--
.../2.6/configs/ranger-admin-default.json | 953 +-
.../2.6/configs/ranger-admin-secured.json | 1066 +-
.../src/test/python/stacks/utils/RMFTestCase.py | 8 +-
.../controllers/global/cluster_controller.js | 1 +
.../main/admin/stack_and_upgrade_controller.js | 59 +-
ambari-web/app/mappers/hosts_mapper.js | 12 +-
ambari-web/app/messages.js | 3 +
.../stack_version/stack_upgrade_history.js | 17 +-
.../modal_popups/install_repo_confirmation.hbs | 27 +
.../admin/stack_upgrade/upgrade_history.hbs | 4 +-
.../main/admin/stack_upgrade/versions.hbs | 7 +
ambari-web/app/utils/ajax/ajax.js | 27 +-
.../admin/stack_upgrade/upgrade_history_view.js | 2 +-
.../stack_upgrade/upgrade_version_box_view.js | 8 +-
.../main/admin/stack_upgrade/versions_view.js | 21 +-
.../admin/stack_and_upgrade_controller_test.js | 48 +-
.../upgrade_version_box_view_test.js | 11 +-
.../admin/stack_upgrade/version_view_test.js | 148 +-
dev-support/docker/docker/bin/ambaribuild.py | 21 +-
.../docker/docker/bin/test/ambaribuild_test.py | 17 +
pom.xml | 4 +
1259 files changed, 244186 insertions(+), 5454 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/37192c9b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index f5b674e,a2c0b9b..ebd3468
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@@ -80,7 -80,7 +81,8 @@@ import org.slf4j.LoggerFactory
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
+ import com.google.inject.Provider;
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/37192c9b/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
[06/13] ambari git commit: AMBARI-21443. Start All service not
getting invoked after regenerate keytabs (akovalenko)
Posted by jo...@apache.org.
AMBARI-21443. Start All service not getting invoked after regenerate keytabs (akovalenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e92aae45
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e92aae45
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e92aae45
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: e92aae457b9e7cac74321659187fed427b5c8d58
Parents: 4590eb3
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Jul 11 17:22:37 2017 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Jul 11 17:22:37 2017 +0300
----------------------------------------------------------------------
ambari-web/app/controllers/main/service.js | 13 +++++++++++-
ambari-web/app/utils/ajax/ajax.js | 22 ++++++++++++++++++++
.../test/controllers/main/service_test.js | 4 ++--
3 files changed, 36 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92aae45/ambari-web/app/controllers/main/service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service.js b/ambari-web/app/controllers/main/service.js
index eb9df0d..343105f 100644
--- a/ambari-web/app/controllers/main/service.js
+++ b/ambari-web/app/controllers/main/service.js
@@ -177,9 +177,20 @@ App.MainServiceController = Em.ArrayController.extend(App.SupportClientConfigsDo
},
/**
- * Restart all services - stops all services, then starts them back
+ * Restart all services - restarts by sending one RESTART command
*/
restartAllServices: function () {
+ App.ajax.send({
+ name: 'restart.allServices',
+ sender: this,
+ showLoadingPopup: true
+ });
+ },
+
+ /**
+ * Restart all services - stops all services, then starts them back
+ */
+ stopAndStartAllServices: function () {
this.silentStopAllServices();
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92aae45/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index eab94bb..daad29e 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2349,6 +2349,28 @@ var urls = {
}
},
+ 'restart.allServices': {
+ 'real': '/clusters/{clusterName}/requests',
+ 'mock': '',
+ 'format': function (data) {
+ return {
+ type: 'POST',
+ data: JSON.stringify({
+ "RequestInfo": {
+ "command": "RESTART",
+ "context": 'Restart all services',
+ "operation_level": 'host_component'
+ },
+ "Requests/resource_filters": [
+ {
+ "hosts_predicate": "HostRoles/cluster_name=" + data.clusterName
+ }
+ ]
+ })
+ }
+ }
+ },
+
'restart.staleConfigs': {
'real': "/clusters/{clusterName}/requests",
'mock': "",
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92aae45/ambari-web/test/controllers/main/service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service_test.js b/ambari-web/test/controllers/main/service_test.js
index 7ed7641..57a3eb4 100644
--- a/ambari-web/test/controllers/main/service_test.js
+++ b/ambari-web/test/controllers/main/service_test.js
@@ -433,7 +433,7 @@ describe('App.MainServiceController', function () {
});
- describe("#restartAllServices()", function() {
+ describe("#stopAndStartAllServices()", function() {
beforeEach(function() {
sinon.stub(mainServiceController, 'silentStopAllServices');
@@ -443,7 +443,7 @@ describe('App.MainServiceController', function () {
});
it("silentStopAllServices should be called", function() {
- mainServiceController.restartAllServices();
+ mainServiceController.stopAndStartAllServices();
expect(mainServiceController.silentStopAllServices.calledOnce).to.be.true;
});
});
[10/13] ambari git commit: AMBARI-21446. Force-remove package does
not work on CentOS 6 and SuSE 11
Posted by jo...@apache.org.
AMBARI-21446. Force-remove package does not work on CentOS 6 and SuSE 11
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4bbdd0e5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4bbdd0e5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4bbdd0e5
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 4bbdd0e550ed96aca5f7fcbd2036cf72543a10b3
Parents: 621f380
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Jul 12 11:13:46 2017 +0200
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Jul 12 11:21:11 2017 -0700
----------------------------------------------------------------------
.../src/test/python/resource_management/TestPackageResource.py | 4 ++--
.../python/resource_management/core/providers/package/yumrpm.py | 2 +-
.../python/resource_management/core/providers/package/zypper.py | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-agent/src/test/python/resource_management/TestPackageResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestPackageResource.py b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
index bc1bfeb..51a35eb 100644
--- a/ambari-agent/src/test/python/resource_management/TestPackageResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
@@ -228,7 +228,7 @@ class TestPackageResource(TestCase):
logoutput = False,
ignore_dependencies = True
)
- shell_mock.assert_called_with(['/usr/bin/rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
+ shell_mock.assert_called_with(['rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
@patch.object(shell, "call", new = MagicMock(return_value=(0, None)))
@patch.object(shell, "checked_call")
@@ -256,7 +256,7 @@ class TestPackageResource(TestCase):
logoutput = False,
ignore_dependencies = True
)
- shell_mock.assert_called_with(['/usr/bin/rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
+ shell_mock.assert_called_with(['rpm', '-e', '--nodeps', 'some_package'], logoutput=False, sudo=True)
@patch.object(shell, "call", new = MagicMock(return_value=(0, None)))
@patch.object(shell, "checked_call")
http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index 064b504..a2f0533 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -36,7 +36,7 @@ REMOVE_CMD = {
False: ['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'erase'],
}
-REMOVE_WITHOUT_DEPENDENCIES_CMD = ['/usr/bin/rpm', '-e', '--nodeps']
+REMOVE_WITHOUT_DEPENDENCIES_CMD = ['rpm', '-e', '--nodeps']
REPO_UPDATE_CMD = ['/usr/bin/yum', 'clean','metadata']
http://git-wip-us.apache.org/repos/asf/ambari/blob/4bbdd0e5/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
index c1aab60..f3abdb5 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
@@ -35,7 +35,7 @@ REMOVE_CMD = {
False: ['/usr/bin/zypper', '--quiet', 'remove', '--no-confirm'],
}
-REMOVE_WITHOUT_DEPENDENCIES_CMD = ['/usr/bin/rpm', '-e', '--nodeps']
+REMOVE_WITHOUT_DEPENDENCIES_CMD = ['rpm', '-e', '--nodeps']
REPO_UPDATE_CMD = ['/usr/bin/zypper', 'clean']
[05/13] ambari git commit: AMBARI-21441. Discrepancy in the OS name
in ambari for PPC (aonishuk)
Posted by jo...@apache.org.
AMBARI-21441. Discrepancy in the OS name in ambari for PPC (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4590eb36
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4590eb36
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4590eb36
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 4590eb360c2aadb2206b701577177688b620f301
Parents: 515a641
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Jul 11 13:24:24 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Jul 11 13:24:24 2017 +0300
----------------------------------------------------------------------
.../src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4590eb36/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
index 23441f5..ff132aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
@@ -31,15 +31,15 @@
<unique>false</unique>
</repo>
</os>
- <os family="redhat-ppc6">
+ <os family="redhat-ppc7">
<repo>
- <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.3</baseurl>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.0.3</baseurl>
<repoid>HDP-2.6</repoid>
<reponame>HDP</reponame>
<unique>true</unique>
</repo>
<repo>
- <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ppc64le</baseurl>
<repoid>HDP-UTILS-1.1.0.21</repoid>
<reponame>HDP-UTILS</reponame>
<unique>false</unique>
[02/13] ambari git commit: AMBARI-21427. Assigning hosts concurrently
to same config group may fail with
'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException:
Config group already exist'. (stoader)
Posted by jo...@apache.org.
AMBARI-21427. Assigning hosts concurrently to same config group may fail with 'org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist'. (stoader)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a21fa124
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a21fa124
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a21fa124
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: a21fa124be518d86652341daf401e2857a83ea6a
Parents: 7502691
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Mon Jul 10 13:02:20 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Mon Jul 10 15:13:44 2017 +0200
----------------------------------------------------------------------
.../ambari/server/topology/AmbariContext.java | 81 +++++++++++++++-----
1 file changed, 62 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a21fa124/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..dee0e6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -30,6 +30,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
import javax.annotation.Nullable;
import javax.inject.Inject;
@@ -69,9 +70,11 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.utils.RetryHelper;
import org.slf4j.Logger;
@@ -79,6 +82,8 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Striped;
+import com.google.inject.Provider;
/**
@@ -99,6 +104,12 @@ public class AmbariContext {
@Inject
ConfigFactory configFactory;
+ /**
+ * Used for getting configuration property values from stack and services.
+ */
+ @Inject
+ private Provider<ConfigHelper> configHelper;
+
private static AmbariManagementController controller;
private static ClusterController clusterController;
//todo: task id's. Use existing mechanism for getting next task id sequence
@@ -112,6 +123,16 @@ public class AmbariContext {
private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
+
+ /**
+ * When config groups are created using Blueprints these are created when
+ * hosts join a hostgroup and are added to the corresponding config group.
+ * Since hosts join in parallel there might be a race condition in creating
+ * the config group a host is to be added to. Thus we need to synchronize
+ * the creation of config groups with the same name.
+ */
+ private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
+
public boolean isClusterKerberosEnabled(long clusterId) {
Cluster cluster;
try {
@@ -167,9 +188,10 @@ public class AmbariContext {
public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
Stack stack = topology.getBlueprint().getStack();
+ StackId stackId = new StackId(stack.getName(), stack.getVersion());
createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
- createAmbariServiceAndComponentResources(topology, clusterName);
+ createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
}
public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -196,7 +218,8 @@ public class AmbariContext {
}
}
- public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
+ public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
+ StackId stackId, String repositoryVersion) {
Collection<String> services = topology.getBlueprint().getServices();
try {
@@ -205,11 +228,13 @@ public class AmbariContext {
} catch (AmbariException e) {
throw new RuntimeException("Failed to persist service and component resources: " + e, e);
}
- Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
- Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
+ Set<ServiceRequest> serviceRequests = new HashSet<>();
+ Set<ServiceComponentRequest> componentRequests = new HashSet<>();
for (String service : services) {
String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
- serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
+ serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
+ repositoryVersion, null, credentialStoreEnabled));
+
for (String component : topology.getBlueprint().getComponents(service)) {
String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -223,14 +248,14 @@ public class AmbariContext {
}
// set all services state to INSTALLED->STARTED
// this is required so the user can start failed services at the service level
- Map<String, Object> installProps = new HashMap<String, Object>();
+ Map<String, Object> installProps = new HashMap<>();
installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Map<String, Object> startProps = new HashMap<String, Object>();
+ Map<String, Object> startProps = new HashMap<>();
startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Predicate predicate = new EqualsPredicate<String>(
- ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+ Predicate predicate = new EqualsPredicate<>(
+ ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
try {
getServiceResourceProvider().updateResources(
new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -262,9 +287,9 @@ public class AmbariContext {
}
String clusterName = cluster.getClusterName();
- Map<String, Object> properties = new HashMap<String, Object>();
+ Map<String, Object> properties = new HashMap<>();
properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
- properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
+ properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
try {
@@ -275,7 +300,7 @@ public class AmbariContext {
hostName, e.toString()), e);
}
- final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<>();
for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
String service = entry.getKey();
@@ -328,11 +353,17 @@ public class AmbariContext {
}
public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
+ String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
+
+ Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
+
try {
+ configGroupLock.lock();
+
boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
- return addHostToExistingConfigGroups(hostName, topology, groupName);
+ return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
}
});
if (!hostAdded) {
@@ -342,6 +373,9 @@ public class AmbariContext {
LOG.error("Unable to register config group for host: ", e);
throw new RuntimeException("Unable to register config group for host: " + hostName);
}
+ finally {
+ configGroupLock.unlock();
+ }
}
public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -549,7 +583,7 @@ public class AmbariContext {
/**
* Add the new host to an existing config group.
*/
- private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
+ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
boolean addedHost = false;
Clusters clusters;
Cluster cluster;
@@ -563,9 +597,8 @@ public class AmbariContext {
// I don't know of a method to get config group by name
//todo: add a method to get config group by name
Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
- String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
for (ConfigGroup group : configGroups.values()) {
- if (group.getName().equals(qualifiedGroupName)) {
+ if (group.getName().equals(configGroupName)) {
try {
Host host = clusters.getHost(hostName);
addedHost = true;
@@ -589,7 +622,7 @@ public class AmbariContext {
* and the hosts associated with the host group are assigned to the config group.
*/
private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
- Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
+ Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
Stack stack = topology.getBlueprint().getStack();
// get the host-group config with cluster creation template overrides
@@ -608,7 +641,7 @@ public class AmbariContext {
//todo: attributes
Map<String, Config> serviceConfigs = groupConfigs.get(service);
if (serviceConfigs == null) {
- serviceConfigs = new HashMap<String, Config>();
+ serviceConfigs = new HashMap<>();
groupConfigs.put(service, serviceConfigs);
}
serviceConfigs.put(type, config);
@@ -669,6 +702,16 @@ public class AmbariContext {
return String.format("%s:%s", bpName, hostGroupName);
}
+ /**
+ * Gets an instance of {@link ConfigHelper} for classes which are not
+ * dependency injected.
+ *
+ * @return a {@link ConfigHelper} instance.
+ */
+ public ConfigHelper getConfigHelper() {
+ return configHelper.get();
+ }
+
private synchronized HostResourceProvider getHostResourceProvider() {
if (hostResourceProvider == null) {
hostResourceProvider = (HostResourceProvider)
[08/13] ambari git commit: AMBARI-21444. Hive warehouse
fixes.(vbrodetskyi)
Posted by jo...@apache.org.
AMBARI-21444. Hive warehouse fixes.(vbrodetskyi)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/88d93b92
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/88d93b92
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/88d93b92
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 88d93b92429225e03f46cb739a738b9033d45b84
Parents: 37192c9
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Jul 12 15:07:09 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Jul 12 15:07:09 2017 +0300
----------------------------------------------------------------------
.../HIVE/0.12.0.2.0/package/scripts/hive.py | 3 +-
.../0.12.0.2.0/package/scripts/params_linux.py | 2 ++
.../services/HIVE/configuration/hive-site.xml | 35 ++++++++++++++++++++
.../stacks/2.0.6/HIVE/test_hive_server.py | 2 ++
4 files changed, 41 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/88d93b92/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 1c53804..0d6e6dc 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -188,7 +188,8 @@ def hive(name=None):
type="directory",
action="create_on_execute",
owner=params.hive_user,
- mode=0777
+ group=params.user_group,
+ mode=params.hive_apps_whs_mode
)
else:
Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
http://git-wip-us.apache.org/repos/asf/ambari/blob/88d93b92/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index cf116c5..4b595a8 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -506,6 +506,8 @@ hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0755
+#Parameter for custom warehouse directory permissions. Permissions are in octal format and need to be converted to decimal
+hive_apps_whs_mode = int(default('/configurations/hive-site/custom.hive.warehouse.mode', '0777'), 8)
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
http://git-wip-us.apache.org/repos/asf/ambari/blob/88d93b92/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..a07c16f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+ <property>
+ <name>hive.warehouse.subdir.inherit.perms</name>
+ <value>true</value>
+ <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+
+ <property>
+ <name>hive.start.cleanup.scratchdir</name>
+ <value>false</value>
+ <description>To cleanup the hive scratchdir while starting the hive server.
+ </description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/88d93b92/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 5a4f8d9..512608c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -368,6 +368,7 @@ class TestHiveServer(RMFTestCase):
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+ group = 'hadoop',
mode = 0777,
)
self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -576,6 +577,7 @@ class TestHiveServer(RMFTestCase):
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+ group = 'hadoop',
mode = 0777,
)
self.assertResourceCalled('HdfsResource', '/user/hive',
[11/13] ambari git commit: AMBARI-21455. Remove unnecessary services
from BigInsights stack (alejandro)
Posted by jo...@apache.org.
AMBARI-21455. Remove unnecessary services from BigInsights stack (alejandro)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/08f48c1e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/08f48c1e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/08f48c1e
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 08f48c1eb85a3763891584b835977809936f3a19
Parents: 4bbdd0e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 12 10:22:27 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Jul 12 11:31:17 2017 -0700
----------------------------------------------------------------------
.../BigInsights/4.2.5/role_command_order.json | 12 +-
.../BigInsights/4.2.5/services/stack_advisor.py | 53 --------
.../BigInsights/4.2/role_command_order.json | 3 +-
.../4.2/services/SYSTEMML/metainfo.xml | 77 -----------
.../SYSTEMML/package/scripts/__init__.py | 19 ---
.../services/SYSTEMML/package/scripts/params.py | 40 ------
.../SYSTEMML/package/scripts/service_check.py | 43 -------
.../SYSTEMML/package/scripts/systemml_client.py | 49 -------
.../services/TITAN/configuration/titan-env.xml | 48 -------
.../TITAN/configuration/titan-hbase-solr.xml | 67 ----------
.../TITAN/configuration/titan-log4j.xml | 66 ----------
.../4.2/services/TITAN/kerberos.json | 17 ---
.../BigInsights/4.2/services/TITAN/metainfo.xml | 88 -------------
.../TITAN/package/files/titanSmoke.groovy | 20 ---
.../services/TITAN/package/scripts/params.py | 128 -------------------
.../TITAN/package/scripts/service_check.py | 64 ----------
.../4.2/services/TITAN/package/scripts/titan.py | 70 ----------
.../TITAN/package/scripts/titan_client.py | 58 ---------
18 files changed, 3 insertions(+), 919 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
index dc4811b..35fc0d8 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
@@ -4,22 +4,14 @@
"general_deps" : {
"_comment" : "dependencies for all cases",
"HIVE_SERVER_INTERACTIVE-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START", "MYSQL_SERVER-START"],
- "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP", "KERNEL_GATEWAY-STOP" ],
+ "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP"],
"NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "KERNEL_GATEWAY-STOP" ],
"NAMENODE-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
"HIVE_SERVER_INTERACTIVE-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
"HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START", "HIVE_SERVER_INTERACTIVE-START"],
"RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"],
"SPARK2_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK2_JOBHISTORYSERVER-START", "APP_TIMELINE_SERVER-START"],
- "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"],
- "TITAN_SERVER-START" : ["HBASE_SERVICE_CHECK-SERVICE_CHECK", "SOLR-START"],
- "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["TITAN_SERVER-START"],
- "KERNEL_GATEWAY-INSTALL": ["SPARK2_CLIENT-INSTALL"],
- "PYTHON_CLIENT-INSTALL": ["KERNEL_GATEWAY-INSTALL"],
- "KERNEL_GATEWAY-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "SPARK2_JOBHISTORYSERVER-START"],
- "JNBG_SERVICE_CHECK-SERVICE_CHECK": ["KERNEL_GATEWAY-START"],
- "R4ML-INSTALL": ["SPARK2_CLIENT-INSTALL", "SYSTEMML-INSTALL"],
- "R4ML_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", "SPARK2_JOBHISTORYSERVER-START"]
+ "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"]
},
"_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
"optional_no_glusterfs": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
index 8883f57..1caa307 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
@@ -26,9 +26,7 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
parentRecommendConfDict = super(BigInsights425StackAdvisor, self).getServiceConfigurationRecommenderDict()
childRecommendConfDict = {
"HDFS": self.recommendHDFSConfigurations,
- "JNBG": self.recommendJNBGConfigurations,
"SOLR": self.recommendSolrConfigurations,
- "TITAN": self.recommendTitanConfigurations,
"RANGER": self.recommendRangerConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
@@ -37,55 +35,11 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
def getServiceConfigurationValidators(self):
parentValidators = super(BigInsights425StackAdvisor, self).getServiceConfigurationValidators()
childValidators = {
- "JNBG": {"jnbg-env": self.validateJNBGConfigurations},
"SOLR": {"ranger-solr-plugin-properties": self.validateSolrRangerPluginConfigurations}
}
self.mergeValidators(parentValidators, childValidators)
return parentValidators
- def recommendJNBGConfigurations(self, configurations, clusterData, services, hosts):
- putJNBGEnvProperty = self.putProperty(configurations, "jnbg-env", services)
- putJNBGEnvPropertyAttribute = self.putPropertyAttribute(configurations, "jnbg-env")
-
- distro_version = platform.linux_distribution()[1]
- # On RHEL 6.x default path does not point to a Python 2.7
- # so empty out the field and force user to update the path
- if distro_version < "7.0":
- putJNBGEnvProperty('python_interpreter_path', "")
-
- def validateJNBGConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
- validationItems = []
- jnbg_env = getSiteProperties(configurations, "jnbg-env")
- py_exec = jnbg_env.get("python_interpreter_path") if jnbg_env and "python_interpreter_path" in jnbg_env else []
-
- # Test that it is a valid executable path before proceeding
- if not os.path.isfile(py_exec) and not os.access(py_exec, os.X_OK):
- validationItems.append({"config-name": "python_interpreter_path",
- "item": self.getErrorItem("Invalid Python interpreter path specified")})
- return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
- distro_version = platform.linux_distribution()[1]
- if distro_version < "7.0" and (py_exec == "/opt/rh/python27/root/usr/bin/python" or py_exec == "/opt/rh/python27/root/usr/bin/python2" or py_exec == "/opt/rh/python27/root/usr/bin/python2.7"):
- # Special handling for RHSCL Python 2.7
- proc = Popen(['/usr/bin/scl', 'enable', 'python27', '/opt/rh/python27/root/usr/bin/python' ' -V'], stderr=PIPE)
- else:
- proc = Popen([py_exec, '-V'], stderr=PIPE)
- py_string = proc.communicate()[1]
- py_version = py_string.split()[1]
-
- if "Python" not in py_string:
- validationItems.append({"config-name": "python_interpreter_path",
- "item": self.getErrorItem("Path specified does not appear to be a Python interpreter")})
- return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
- # Validate that the specified python is 2.7.x (not > 2.x.x and not < 2.7)
- if not py_version.split('.')[0] == '2' or (py_version.split('.')[0] == '2' and py_version.split('.')[1] < '7'):
- validationItems.append({"config-name": "python_interpreter_path",
- "item": self.getErrorItem("Specified Python interpreter must be version >= 2.7 and < 3.0")})
- return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
- return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
-
def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
super(BigInsights425StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
@@ -96,13 +50,6 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
zookeeper_host_port = ",".join(zookeeper_host_port)
ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'solr')
putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
-
- def recommendTitanConfigurations(self, configurations, clusterData, services, hosts):
- putTitanPropertyAttribute = self.putPropertyAttribute(configurations, "titan-env")
- servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
- knox_enabled = "KNOX" in servicesList
- if knox_enabled:
- putTitanPropertyAttribute("SimpleAuthenticator", "visible", "false")
def recommendSolrConfigurations(self, configurations, clusterData, services, hosts):
super(BigInsights425StackAdvisor, self).recommendSolrConfigurations(configurations, clusterData, services, hosts)
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
index 5ee4b32..cc45213 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
@@ -19,8 +19,7 @@
"ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
"PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
"OOZIE_SERVER-START": ["FALCON_SERVER-START"],
- "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
- "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_SERVICE_CHECK-SERVICE_CHECK"]
+ "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"]
},
"_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
"optional_no_glusterfs": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
deleted file mode 100755
index b73e31e..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<metainfo>
- <schemaVersion>2.0</schemaVersion>
- <services>
- <service>
- <name>SYSTEMML</name>
- <displayName>SystemML</displayName>
- <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
- <version>0.10.0.4.2</version>
- <components>
- <component>
- <name>SYSTEMML</name>
- <displayName>SystemML</displayName>
- <category>CLIENT</category>
- <cardinality>0+</cardinality>
- <versionAdvertised>true</versionAdvertised>
- <dependencies>
- <dependency>
- <name>HDFS/HDFS_CLIENT</name>
- <scope>host</scope>
- <auto-deploy>
- <enabled>true</enabled>
- </auto-deploy>
- </dependency>
- </dependencies>
- <commandScript>
- <script>scripts/systemml_client.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- <configFiles>
- </configFiles>
- </component>
- </components>
- <osSpecifics>
- <osSpecific>
- <osFamily>any</osFamily>
- <packages>
- <package>
- <name>apache_systemml*</name>
- </package>
- </packages>
- </osSpecific>
- </osSpecifics>
-
- <commandScript>
- <script>scripts/service_check.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>300</timeout>
- </commandScript>
-
- <requiredServices>
- <service>HDFS</service>
- </requiredServices>
-
- <configuration-dependencies>
- </configuration-dependencies>
-
- </service>
- </services>
-</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
deleted file mode 100755
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
deleted file mode 100755
index dd7e46c..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.script.script import Script
-
-# server configurations
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-
-systemml_home_dir = format("{stack_root}/current/systemml-client")
-systemml_lib_dir = format("{systemml_home_dir}/lib")
-systemml_scripts_dir = format("{systemml_home_dir}/scripts")
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-stack_version = format_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-stack_name = default("/hostLevelParams/stack_name", None)
-
-java_home = config['hostLevelParams']['java_home']
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
deleted file mode 100755
index c15b907..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.format import format
-import subprocess
-import os
-
-class SystemMLServiceCheck(Script):
- def service_check(self, env):
- import params
- env.set_params(params)
-
- if os.path.exists(params.systemml_lib_dir):
- cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
- java = format("{params.java_home}/bin/java")
- command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
- process = subprocess.Popen(command, stdout=subprocess.PIPE)
- output = process.communicate()[0]
- print output
-
- if 'Apache SystemML' not in output:
- raise Fail("Expected output Apache SystemML not found.")
-
-if __name__ == "__main__":
- SystemMLServiceCheck().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
deleted file mode 100755
index 2d45b68..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-#from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-
-
-class SystemMLClient(Script):
-
- def get_component_name(self):
- return "systemml-client"
-
- def pre_upgrade_restart(self, env, upgrade_type=None):
- import params
-
- env.set_params(params)
- if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
- #conf_select.select(params.stack_name, "systemml", params.version)
- stack_select.select("systemml-client", params.version)
-
- def install(self, env):
- self.install_packages(env)
-
- def status(self, env):
- raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
- SystemMLClient().execute()
-
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
deleted file mode 100755
index 4f80ea1..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
- <property>
- <name>titan_user</name>
- <description>User to run Titan as</description>
- <on-ambari-upgrade add="true"/>
- <property-type>USER</property-type>
- <value>titan</value>
- </property>
-
- <property>
- <name>content</name>
- <description>This is the template for titan-env.sh file</description>
- <value>
-# Set JAVA HOME
-export JAVA_HOME={{java64_home}}
-
-# Add hadoop and hbase configuration directories into classpath
-export HADOOP_CONF_DIR={{hadoop_config_dir}}
-export HBASE_CONF_DIR={{hbase_config_dir}}
-CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
- </value>
- <on-ambari-upgrade add="true"/>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
deleted file mode 100755
index dd45141..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_adding_forbidden="true" supports_do_not_extend="true">
-
- <property>
- <name>content</name>
- <description>Describe the configurations for Solr</description>
- <value># Titan configuration sample: HBase and Solr
-# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
-
-# This file connects to HBase using a Zookeeper quorum
-# (storage.hostname) consisting solely of localhost. It also
-# connects to Solr running on localhost using Solr's HTTP API.
-# Zookeeper, the HBase services, and Solr must already be
-# running and available before starting Titan with this file.
-storage.backend=hbase
-storage.hostname={{storage_host}}
-storage.hbase.table=titan_solr
-storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
-
-cache.db-cache = true
-cache.db-cache-clean-wait = 20
-cache.db-cache-time = 180000
-cache.db-cache-size = 0.5
-
-# The indexing backend used to extend and optimize Titan's query
-# functionality. This setting is optional. Titan can use multiple
-# heterogeneous index backends. Hence, this option can appear more than
-# once, so long as the user-defined name between "index" and "backend" is
-# unique among appearances.Similar to the storage backend, this should be
-# set to one of Titan's built-in shorthand names for its standard index
-# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
-# package and classname of a custom/third-party IndexProvider
-# implementation.
-
-index.search.backend=solr
-index.search.solr.mode=cloud
-index.search.solr.zookeeper-url={{solr_server_host}}/solr
-index.search.solr.configset=titan
- </value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <show-property-name>false</show-property-name>
- </value-attributes>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
deleted file mode 100755
index f61a479..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
- <property>
- <name>content</name>
- <description>Custom log4j-console.properties</description>
- <value>
- # Used by gremlin.sh
-
- log4j.appender.A2=org.apache.log4j.ConsoleAppender
- log4j.appender.A2.Threshold=TRACE
- log4j.appender.A2.layout=org.apache.log4j.PatternLayout
- log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
-
- log4j.rootLogger=${gremlin.log4j.level}, A2
-
- #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
- #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
-
- # Disable spurious Hadoop config deprecation warnings under 2.2.0.
- #
- # See https://issues.apache.org/jira/browse/HADOOP-10178
- #
- # This can and should be deleted when we upgrade our Hadoop 2.2.0
- # dependency to 2.3.0 or 3.0.0.
- log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
-
- # Configure MR at its own loglevel. We usually want MR at INFO,
- # even if the rest of the loggers are at WARN or ERROR or FATAL,
- # because job progress information is at INFO.
- log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
- log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
-
- # This generates 3 INFO lines per jar on the classpath -- usually more
- # noise than desirable in the REPL. Switching it to the default
- # log4j level means it will be at WARN by default, which is ideal.
- log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
- </value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <show-property-name>false</show-property-name>
- </value-attributes>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
deleted file mode 100755
index ccabbf0..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "services": [
- {
- "name": "TITAN",
- "components": [
- {
- "name": "TITAN",
- "identities": [
- {
- "name": "/HDFS/NAMENODE/hdfs"
- }
- ]
- }
- ]
- }
- ]
-}
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
deleted file mode 100755
index 73f4635..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<metainfo>
- <schemaVersion>2.0</schemaVersion>
- <services>
- <service>
- <name>TITAN</name>
- <displayName>Titan</displayName>
- <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
- billions of vertices and edges distributed across a multi-machine cluster.</comment>
- <version>1.0.0</version>
- <components>
- <component>
- <name>TITAN</name>
- <displayName>Titan</displayName>
- <category>CLIENT</category>
- <cardinality>0+</cardinality>
- <versionAdvertised>true</versionAdvertised>
- <commandScript>
- <script>scripts/titan_client.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- <configFiles>
- <configFile>
- <type>env</type>
- <fileName>titan-env.sh</fileName>
- <dictionaryName>titan-env</dictionaryName>
- </configFile>
- <configFile>
- <type>env</type>
- <fileName>log4j-console.properties</fileName>
- <dictionaryName>titan-log4j</dictionaryName>
- </configFile>
- <configFile>
- <type>env</type>
- <fileName>titan-hbase-solr.properties</fileName>
- <dictionaryName>titan-hbase-solr</dictionaryName>
- </configFile>
- </configFiles>
- </component>
- </components>
- <osSpecifics>
- <osSpecific>
- <osFamily>any</osFamily>
- <packages>
- <package>
- <name>titan_4_2_*</name>
- </package>
- </packages>
- </osSpecific>
- </osSpecifics>
-
- <commandScript>
- <script>scripts/service_check.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>300</timeout>
- </commandScript>
-
- <requiredServices>
- <service>HDFS</service>
- <service>HBASE</service>
- <service>SOLR</service>
- </requiredServices>
-
- <configuration-dependencies>
- <config-type>titan-env</config-type>
- <config-type>titan-hbase-solr</config-type>
- <config-type>titan-log4j</config-type>
- </configuration-dependencies>
- </service>
- </services>
-</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
deleted file mode 100755
index 79438be..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
+++ /dev/null
@@ -1,20 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-import com.thinkaurelius.titan.core.TitanFactory;
-
-graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
-g = graph.traversal()
-l = g.V().values('name').toList()
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
deleted file mode 100755
index 3cb7aef..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-
-titan_user = config['configurations']['titan-env']['titan_user']
-user_group = config['configurations']['cluster-env']['user_group']
-titan_bin_dir = '/usr/iop/current/titan-client/bin'
-
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-# titan configurations
-titan_conf_dir = "/usr/iop/current/titan-client/conf"
-titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
-titan_env_props = config['configurations']['titan-env']['content']
-log4j_console_props = config['configurations']['titan-log4j']['content']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_config_dir = '/etc/hadoop/conf'
-hbase_config_dir = '/etc/hbase/conf'
-
-# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
-# The host name should be zooKeeper quorum
-storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
-storage_host_list = []
-for hostname in storage_hosts:
- storage_host_list.append(hostname)
-storage_host = ",".join(storage_host_list)
-hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
-
-# Solr cloud host
-solr_hosts = config['clusterHostInfo']['solr_hosts']
-solr_host_list = []
-for hostname in solr_hosts:
- solr_host_list.append(hostname)
-solr_host = ",".join(solr_host_list)
-solr_server_host = solr_hosts[0]
-
-# Titan client, it does not work right now, there is no 'titan_host' in 'clusterHostInfo'
-# It will return "Configuration parameter 'titan_host' was not found in configurations dictionary!"
-# So here is a known issue as task 118900, will install titan and solr on same node right now.
-# titan_host = config['clusterHostInfo']['titan_host']
-titan_host = solr_server_host
-
-# Conf directory and jar should be copy to solr site
-titan_dir = format('/usr/iop/current/titan-client')
-titan_ext_dir = format('/usr/iop/current/titan-client/ext')
-titan_solr_conf_dir = format('/usr/iop/current/titan-client/conf/solr')
-titan_solr_jar_file = format('/usr/iop/current/titan-client/lib/jts-1.13.jar')
-
-titan_solr_hdfs_dir = "/apps/titan"
-titan_solr_hdfs_conf_dir = "/apps/titan/conf"
-titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
-titan_tmp_dir = format('{tmp_dir}/titan')
-titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
-configuration_tags = config['configurationTags']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-titan_hdfs_mode = 0775
-
-#for create_hdfs_directory
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_site = config['configurations']['hdfs-site']
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-import functools
-#to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
- HdfsResource,
- user = hdfs_user,
- security_enabled = security_enabled,
- keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local,
- hadoop_bin_dir = hadoop_bin_dir,
- hadoop_conf_dir = hadoop_conf_dir,
- principal_name = hdfs_principal_name,
- hdfs_site = hdfs_site,
- default_fs = default_fs
-)
-
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
deleted file mode 100755
index 3c011a1..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import os
-from resource_management import *
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import InlineTemplate, StaticFile
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.validate import call_and_match_output
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-class TitanServiceCheck(Script):
- pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class TitanServiceCheckLinux(TitanServiceCheck):
- def service_check(self, env):
- import params
- env.set_params(params)
-
- File( format("{tmp_dir}/titanSmoke.groovy"),
- content = StaticFile("titanSmoke.groovy"),
- mode = 0755
- )
-
- if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.2') >= 0:
- if params.security_enabled:
- kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
- Execute(kinit_cmd,
- user=params.smokeuser
- )
-
- Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
- tries = 3,
- try_sleep = 5,
- path = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
- user = params.smokeuser,
- logoutput = True
- )
-
-if __name__ == "__main__":
- # print "Track service check status"
- TitanServiceCheckLinux().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
deleted file mode 100755
index fd94c82..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-from resource_management import *
-from resource_management.core.source import InlineTemplate
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def titan():
- import params
-
- Directory(params.titan_conf_dir,
- create_parents = True,
- owner=params.titan_user,
- group=params.user_group
- )
-
- File(format("{params.titan_conf_dir}/titan-env.sh"),
- mode=0644,
- group=params.user_group,
- owner=params.titan_user,
- content=InlineTemplate(params.titan_env_props)
- )
-
- # titan-hbase-solr_properties is always set to a default even if it's not in the payload
- File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.titan_user,
- content=InlineTemplate(params.titan_hbase_solr_props)
- )
-
- if (params.log4j_console_props != None):
- File(format("{params.titan_conf_dir}/log4j-console.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.titan_user,
- content=InlineTemplate(params.log4j_console_props)
- )
- elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
- File(format("{params.titan_conf_dir}/log4j-console.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.titan_user
- )
- # Change titan ext directory for multiple user access
- Directory(params.titan_ext_dir,
- create_parents = True,
- owner=params.titan_user,
- group=params.user_group,
- mode=0775
- )
http://git-wip-us.apache.org/repos/asf/ambari/blob/08f48c1e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
deleted file mode 100755
index d54ccee..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-import os
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from titan import titan
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-class TitanClient(Script):
- def configure(self, env):
- import params
- env.set_params(params)
- titan()
-
- def status(self, env):
- raise ClientComponentHasNoStatus()
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class TitanClientLinux(TitanClient):
- def get_component_name(self):
- return "titan-client"
-
- def pre_rolling_restart(self, env):
- import params
- env.set_params(params)
-
- if params.version and compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:
- conf_select.select(params.stack_name, "titan", params.version)
- stack_select.select("titan-client", params.version)
-
- def install(self, env):
- self.install_packages(env)
- self.configure(env)
-
-if __name__ == "__main__":
- TitanClient().execute()
[13/13] ambari git commit: Merge branch 'branch-2.5' into
branch-feature-AMBARI-21348
Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21348
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/267cd8b0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/267cd8b0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/267cd8b0
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 267cd8b0cee17dc84be2b075b7168cd2518f02b6
Parents: d852928 a6ac40b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 14:46:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:46:40 2017 -0400
----------------------------------------------------------------------
.../libraries/functions/stack_features.py | 28 ++--
.../actionmanager/ExecutionCommandWrapper.java | 3 +-
.../ambari/server/agent/ExecutionCommand.java | 14 +-
.../controller/ActionExecutionContext.java | 30 +++--
.../controller/AmbariActionExecutionHelper.java | 8 +-
.../ClusterStackVersionResourceProvider.java | 2 +-
.../internal/UpgradeResourceProvider.java | 107 +++++++++-------
.../upgrades/FinalizeUpgradeAction.java | 18 +--
.../upgrades/UpgradeUserKerberosDescriptor.java | 41 ++----
.../ambari/server/state/UpgradeContext.java | 31 +++--
.../ambari/server/topology/AmbariContext.java | 30 ++++-
.../HBASE/0.96.0.2.0/package/scripts/hbase.py | 12 +-
.../0.96.0.2.0/package/scripts/params_linux.py | 3 +
.../HIVE/0.12.0.2.0/package/scripts/hive.py | 3 +-
.../0.12.0.2.0/package/scripts/params_linux.py | 2 +
.../SPARK/1.2.1/package/scripts/params.py | 11 +-
.../SPARK/1.2.1/package/scripts/setup_spark.py | 6 +-
.../1.2.1/package/scripts/spark_service.py | 6 +-
.../BigInsights/4.2.5/role_command_order.json | 12 +-
.../BigInsights/4.2.5/services/stack_advisor.py | 53 --------
.../BigInsights/4.2/role_command_order.json | 3 +-
.../4.2/services/SYSTEMML/metainfo.xml | 77 -----------
.../SYSTEMML/package/scripts/__init__.py | 19 ---
.../services/SYSTEMML/package/scripts/params.py | 40 ------
.../SYSTEMML/package/scripts/service_check.py | 43 -------
.../SYSTEMML/package/scripts/systemml_client.py | 49 -------
.../services/TITAN/configuration/titan-env.xml | 48 -------
.../TITAN/configuration/titan-hbase-solr.xml | 67 ----------
.../TITAN/configuration/titan-log4j.xml | 66 ----------
.../4.2/services/TITAN/kerberos.json | 17 ---
.../BigInsights/4.2/services/TITAN/metainfo.xml | 88 -------------
.../TITAN/package/files/titanSmoke.groovy | 20 ---
.../services/TITAN/package/scripts/params.py | 128 -------------------
.../TITAN/package/scripts/service_check.py | 64 ----------
.../4.2/services/TITAN/package/scripts/titan.py | 70 ----------
.../TITAN/package/scripts/titan_client.py | 58 ---------
.../resources/stacks/HDP/2.6/repos/repoinfo.xml | 6 +-
.../services/HIVE/configuration/hive-site.xml | 35 +++++
.../ComponentVersionCheckActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 2 -
.../UpgradeUserKerberosDescriptorTest.java | 19 +--
.../src/test/python/TestStackFeature.py | 44 +++++--
.../python/custom_actions/test_ru_set_all.py | 6 +-
.../stacks/2.0.6/HIVE/test_hive_server.py | 2 +
.../test/python/stacks/2.0.6/configs/nn_eu.json | 2 +-
.../stacks/2.0.6/configs/nn_eu_standby.json | 2 +-
.../2.1/configs/hive-metastore-upgrade.json | 2 +-
.../python/stacks/2.2/configs/knox_upgrade.json | 2 +-
ambari-web/app/controllers/main/service.js | 13 +-
ambari-web/app/utils/ajax/ajax.js | 22 ++++
.../test/controllers/main/service_test.js | 4 +-
51 files changed, 321 insertions(+), 1118 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/267cd8b0/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
[09/13] ambari git commit: Merge branch 'branch-feature-AMBARI-21348'
into branch-2.5
Posted by jo...@apache.org.
Merge branch 'branch-feature-AMBARI-21348' into branch-2.5
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/621f3801
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/621f3801
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/621f3801
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 621f38019a81c4183b0e001efe00e89c6583d85a
Parents: 88d93b9 cb86bf0
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Jul 12 10:11:56 2017 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Jul 12 10:11:56 2017 -0700
----------------------------------------------------------------------
.../0.96.0.2.0/package/scripts/hbase_service.py | 17 +++++++++++++++++
.../0.96.0.2.0/package/scripts/params_linux.py | 9 +++++++++
.../BigInsights/4.2.5/upgrades/config-upgrade.xml | 11 +++++++++++
.../upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 5 +++++
.../BigInsights/4.2/upgrades/config-upgrade.xml | 11 +++++++++++
.../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 7 ++++++-
ambari-web/app/messages.js | 2 ++
ambari-web/app/utils/ajax/ajax.js | 2 +-
.../main/admin/stack_upgrade/versions_view.js | 14 ++++++++++++++
.../main/admin/stack_upgrade/version_view_test.js | 4 ++++
10 files changed, 80 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/621f3801/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/621f3801/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
[12/13] ambari git commit: AMBARI-21451 - Expected Values Like
original_stack Are Missing On Downgrades (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-21451 - Expected Values Like original_stack Are Missing On Downgrades (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a6ac40bc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a6ac40bc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a6ac40bc
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: a6ac40bc07e4c0bec207d0786e4e870a6fe194f2
Parents: 08f48c1
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 13:30:16 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:43:15 2017 -0400
----------------------------------------------------------------------
.../libraries/functions/stack_features.py | 28 +++--
.../actionmanager/ExecutionCommandWrapper.java | 3 +-
.../ambari/server/agent/ExecutionCommand.java | 14 +--
.../controller/ActionExecutionContext.java | 30 +++---
.../controller/AmbariActionExecutionHelper.java | 8 +-
.../ClusterStackVersionResourceProvider.java | 2 +-
.../internal/UpgradeResourceProvider.java | 107 +++++++++++--------
.../upgrades/FinalizeUpgradeAction.java | 18 +---
.../upgrades/UpgradeUserKerberosDescriptor.java | 41 +++----
.../ambari/server/state/UpgradeContext.java | 31 +++---
.../SPARK/1.2.1/package/scripts/params.py | 11 +-
.../SPARK/1.2.1/package/scripts/setup_spark.py | 6 +-
.../1.2.1/package/scripts/spark_service.py | 6 +-
.../ComponentVersionCheckActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 2 -
.../UpgradeUserKerberosDescriptorTest.java | 19 ++--
.../src/test/python/TestStackFeature.py | 44 ++++++--
.../python/custom_actions/test_ru_set_all.py | 6 +-
.../test/python/stacks/2.0.6/configs/nn_eu.json | 2 +-
.../stacks/2.0.6/configs/nn_eu_standby.json | 2 +-
.../2.1/configs/hive-metastore-upgrade.json | 2 +-
.../python/stacks/2.2/configs/knox_upgrade.json | 2 +-
22 files changed, 203 insertions(+), 182 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 7811e26..2c66728 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -104,7 +104,10 @@ def get_stack_feature_version(config):
# something like 2.4.0.0-1234; represents the version for the command
# (or None if this is a cluster install and it hasn't been calculated yet)
- version = default("/commandParams/version", None)
+ # this is always guaranteed to be the correct version for the command, even in
+ # upgrade and downgrade scenarios
+ command_version = default("/commandParams/version", None)
+ command_stack = default("/commandParams/target_stack", None)
# something like 2.4.0.0-1234
# (or None if this is a cluster install and it hasn't been calculated yet)
@@ -114,13 +117,13 @@ def get_stack_feature_version(config):
upgrade_direction = default("/commandParams/upgrade_direction", None)
# start out with the value that's right 99% of the time
- version_for_stack_feature_checks = version if version is not None else stack_version
+ version_for_stack_feature_checks = command_version if command_version is not None else stack_version
# if this is not an upgrade, then we take the simple path
if upgrade_direction is None:
Logger.info(
- "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2} -> {3}".format(
- stack_version, version, current_cluster_version, version_for_stack_feature_checks))
+ "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}-> {4}".format(
+ stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))
return version_for_stack_feature_checks
@@ -129,15 +132,12 @@ def get_stack_feature_version(config):
is_stop_command = _is_stop_command(config)
if not is_stop_command:
Logger.info(
- "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3} -> {4}".format(
- stack_version, version, current_cluster_version, upgrade_direction,
+ "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
+ stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
version_for_stack_feature_checks))
return version_for_stack_feature_checks
- original_stack = default("/commandParams/original_stack", None)
- target_stack = default("/commandParams/target_stack", None)
-
# something like 2.5.0.0-5678 (or None)
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
@@ -153,15 +153,13 @@ def get_stack_feature_version(config):
# UPGRADE
if current_cluster_version is not None:
version_for_stack_feature_checks = current_cluster_version
- elif original_stack is not None:
- version_for_stack_feature_checks = original_stack
else:
- version_for_stack_feature_checks = version if version is not None else stack_version
+ version_for_stack_feature_checks = command_version if command_version is not None else stack_version
Logger.info(
- "Stack Feature Version Info: stack_version={0}, version={1}, current_cluster_version={2}, upgrade_direction={3}, original_stack={4}, target_stack={5}, downgrade_from_version={6}, stop_command={7} -> {8}".format(
- stack_version, version, current_cluster_version, upgrade_direction, original_stack,
- target_stack, downgrade_from_version, is_stop_command, version_for_stack_feature_checks))
+ "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
+ stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
+ is_stop_command, version_for_stack_feature_checks))
return version_for_stack_feature_checks
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index 28946e7..8875314 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -198,8 +198,9 @@ public class ExecutionCommandWrapper {
Map<String,String> commandParams = executionCommand.getCommandParams();
+ // set the version for the command if it's not already set
ClusterVersionEntity effectiveClusterVersion = cluster.getEffectiveClusterVersion();
- if (null != effectiveClusterVersion) {
+ if (null != effectiveClusterVersion && !commandParams.containsKey(KeyNames.VERSION)) {
commandParams.put(KeyNames.VERSION,
effectiveClusterVersion.getRepositoryVersion().getVersion());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 32fb37b..7948d30 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -67,7 +67,7 @@ public class ExecutionCommand extends AgentCommand {
private String role;
@SerializedName("hostLevelParams")
- private Map<String, String> hostLevelParams = new HashMap<String, String>();
+ private Map<String, String> hostLevelParams = new HashMap<>();
@SerializedName("roleParams")
private Map<String, String> roleParams = null;
@@ -77,7 +77,7 @@ public class ExecutionCommand extends AgentCommand {
@SerializedName("clusterHostInfo")
private Map<String, Set<String>> clusterHostInfo =
- new HashMap<String, Set<String>>();
+ new HashMap<>();
@SerializedName("configurations")
private Map<String, Map<String, String>> configurations;
@@ -92,7 +92,7 @@ public class ExecutionCommand extends AgentCommand {
private boolean forceRefreshConfigTagsBeforeExecution = false;
@SerializedName("commandParams")
- private Map<String, String> commandParams = new HashMap<String, String>();
+ private Map<String, String> commandParams = new HashMap<>();
@SerializedName("serviceName")
private String serviceName;
@@ -104,10 +104,10 @@ public class ExecutionCommand extends AgentCommand {
private String componentName;
@SerializedName("kerberosCommandParams")
- private List<Map<String, String>> kerberosCommandParams = new ArrayList<Map<String, String>>();
+ private List<Map<String, String>> kerberosCommandParams = new ArrayList<>();
@SerializedName("localComponents")
- private Set<String> localComponents = new HashSet<String>();
+ private Set<String> localComponents = new HashSet<>();
@SerializedName("availableServices")
private Map<String, String> availableServices = new HashMap<>();
@@ -149,7 +149,7 @@ public class ExecutionCommand extends AgentCommand {
}
public Map<String, Map<String, String>> getConfigurationCredentials() {
- return this.configurationCredentials;
+ return configurationCredentials;
}
public String getCommandId() {
@@ -434,6 +434,8 @@ public class ExecutionCommand extends AgentCommand {
String USER_GROUPS = "user_groups";
String NOT_MANAGED_HDFS_PATH_LIST = "not_managed_hdfs_path_list";
String VERSION = "version";
+ String SOURCE_STACK = "source_stack";
+ String TARGET_STACK = "target_stack";
String REFRESH_TOPOLOGY = "refresh_topology";
String HOST_SYS_PREPPED = "host_sys_prepped";
String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index c361094..af506f2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -25,7 +25,7 @@ import java.util.Map;
import org.apache.ambari.server.actionmanager.TargetHostType;
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
import org.apache.ambari.server.controller.internal.RequestResourceFilter;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
/**
* The context required to create tasks and stages for a custom action
@@ -42,7 +42,7 @@ public class ActionExecutionContext {
private String expectedComponentName;
private boolean hostsInMaintenanceModeExcluded = true;
private boolean allowRetry = false;
- private StackId stackId;
+ private RepositoryVersionEntity repositoryVersion;
/**
* {@code true} if slave/client component failures should be automatically
@@ -171,27 +171,29 @@ public class ActionExecutionContext {
}
/**
- * Gets the stack to use for generating stack-associated values for a command.
- * In some cases the cluster's stack is not the correct one to use, such as
- * when distributing a repository.
+ * Gets the stack/version to use for generating stack-associated values for a
+ * command. In some cases the cluster's stack is not the correct one to use,
+ * such as when distributing a repository.
*
- * @return the stackId the stack to use when generating stack-specific content
- * for the command.
+ * @return the repository for the stack/version to use when generating
+ * stack-specific content for the command.
+ *
+ * @return
*/
- public StackId getStackId() {
- return stackId;
+ public RepositoryVersionEntity getRepositoryVersion() {
+ return repositoryVersion;
}
/**
- * Sets the stack to use for generating stack-associated values for a command.
- * In some cases the cluster's stack is not the correct one to use, such as
- * when distributing a repository.
+ * Sets the stack/version to use for generating stack-associated values for a
+ * command. In some cases the cluster's stack is not the correct one to use,
+ * such as when distributing a repository.
*
* @param stackId
* the stackId to use for stack-based properties on the command.
*/
- public void setStackId(StackId stackId) {
- this.stackId = stackId;
+ public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+ this.repositoryVersion = repositoryVersion;
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index f75fb41..0638910 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -553,10 +553,12 @@ public class AmbariActionExecutionHelper {
// set the host level params if not already set by whoever is creating this command
if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
- // see if the action context has a stack ID set to use, otherwise use the
+ // see if the action context has a repository set to use for the command, otherwise use the
// cluster's current stack ID
- StackId stackId = actionContext.getStackId() != null ? actionContext.getStackId()
- : cluster.getCurrentStackVersion();
+ StackId stackId = cluster.getCurrentStackVersion();
+ if (null != actionContext.getRepositoryVersion()) {
+ stackId = actionContext.getRepositoryVersion().getStackId();
+ }
hostLevelParams.put(STACK_NAME, stackId.getStackName());
hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 9ea6083..633fe8c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -713,7 +713,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
- actionContext.setStackId(stackId);
+ actionContext.setRepositoryVersion(repoVersion);
actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
return actionContext;
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 1130026..858b7cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -883,9 +883,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
// keep track of which stack to use when building commands - an express
// upgrade switches the stack half way through while other types move it in
// the beginning
- StackId effectiveStackId = upgradeContext.getTargetStackId();
+ RepositoryVersionEntity effectiveRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
if(upgradeContext.getType() == UpgradeType.NON_ROLLING ) {
- effectiveStackId = upgradeContext.getSourceStackId();
+ effectiveRepositoryVersion = upgradeContext.getSourceRepositoryVersion();
}
for (UpgradeGroupHolder group : groups) {
@@ -895,7 +895,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
if (upgradeContext.getType() == UpgradeType.NON_ROLLING
&& UpdateStackGrouping.class.equals(group.groupClass)) {
- effectiveStackId = upgradeContext.getTargetStackId();
+ effectiveRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
}
List<UpgradeItemEntity> itemEntities = new ArrayList<>();
@@ -919,7 +919,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
itemEntities.add(itemEntity);
injectVariables(configHelper, cluster, itemEntity);
- makeServerSideStage(upgradeContext, req, effectiveStackId, itemEntity,
+ makeServerSideStage(upgradeContext, req, effectiveRepositoryVersion, itemEntity,
(ServerSideActionTask) task, skippable, supportsAutoSkipOnFailure, allowRetry,
pack, configUpgradePack);
}
@@ -934,7 +934,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
injectVariables(configHelper, cluster, itemEntity);
// upgrade items match a stage
- createStage(upgradeContext, req, effectiveStackId, itemEntity, wrapper, skippable,
+ createStage(upgradeContext, req, effectiveRepositoryVersion, itemEntity, wrapper,
+ skippable,
supportsAutoSkipOnFailure, allowRetry);
}
}
@@ -1275,10 +1276,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* the upgrade context (not {@code null}).
* @param request
* the request to add the new stage to (not {@code null}).
- * @param effectiveStackId
- * the stack ID to use when building the command. This will determine
- * things like stack tools and version information added to the
- * command (not {@code null}).
+ * @param effectiveRepositoryVersion
+ * the stack/version to use when building the command. This will
+ * determine things like stack tools and version information added to
+ * the command (not {@code null}).
* @param entity
* the upgrade entity to add the new items to (not {@code null}).
* @param wrapper
@@ -1292,25 +1293,27 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @throws AmbariException
*/
private void createStage(UpgradeContext context, RequestStageContainer request,
- StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
- boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+ RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+ StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+ boolean allowRetry) throws AmbariException {
switch (wrapper.getType()) {
case CONFIGURE:
case START:
case STOP:
case RESTART:
- makeCommandStage(context, request, effectiveStackId, entity, wrapper, skippable,
+ makeCommandStage(context, request, effectiveRepositoryVersion, entity, wrapper, skippable,
supportsAutoSkipOnFailure,
allowRetry);
break;
case RU_TASKS:
- makeActionStage(context, request, effectiveStackId, entity, wrapper, skippable,
+ makeActionStage(context, request, effectiveRepositoryVersion, entity, wrapper, skippable,
supportsAutoSkipOnFailure,
allowRetry);
break;
case SERVICE_CHECK:
- makeServiceCheckStage(context, request, effectiveStackId, entity, wrapper, skippable,
+ makeServiceCheckStage(context, request, effectiveRepositoryVersion, entity, wrapper,
+ skippable,
supportsAutoSkipOnFailure, allowRetry);
break;
default:
@@ -1341,9 +1344,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* the upgrade context.
* @param request
* the request object to add the stage to.
- * @param effectiveStackId
- * the stack ID to use when generating content for the command. On
- * some upgrade types, this may change during the course of the
+ * @param effectiveRepositoryVersion
+ * the stack/version to use when generating content for the command.
+ * On some upgrade types, this may change during the course of the
* upgrade orchestration. An express upgrade changes this after
* stopping all services.
* @param entity
@@ -1360,8 +1363,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @throws AmbariException
*/
private void makeActionStage(UpgradeContext context, RequestStageContainer request,
- StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
- boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+ RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+ StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+ boolean allowRetry) throws AmbariException {
if (0 == wrapper.getHosts().size()) {
throw new AmbariException(
@@ -1369,13 +1373,14 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
Cluster cluster = context.getCluster();
+ StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
// add each host to this stage
RequestResourceFilter filter = new RequestResourceFilter("", "",
new ArrayList<>(wrapper.getHosts()));
LOG.debug("Analyzing upgrade item {} with tasks: {}.", entity.getText(), entity.getTasks());
- Map<String, String> params = getNewParameterMap(request, context);
+ Map<String, String> params = getNewParameterMap(request, context, effectiveRepositoryVersion);
params.put(UpgradeContext.COMMAND_PARAM_TASKS, entity.getTasks());
// Apply additional parameters to the command that come from the stage.
@@ -1403,7 +1408,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
EXECUTE_TASK_ROLE, Collections.singletonList(filter), params);
- actionContext.setStackId(effectiveStackId);
+ actionContext.setRepositoryVersion(effectiveRepositoryVersion);
// hosts in maintenance mode are excluded from the upgrade
actionContext.setMaintenanceModeHostExcluded(true);
@@ -1449,10 +1454,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* Upgrade Context
* @param request
* Container for stage
- * @param effectiveStackId
- * the stack ID to use when building the command. This will determine
- * things like stack tools and version information added to the
- * command (not {@code null}).
+ * @param effectiveRepositoryVersion
+ * the stack/version to use when building the command. This will
+ * determine things like stack tools and version information added to
+ * the command (not {@code null}).
* @param entity
* Upgrade Item
* @param wrapper
@@ -1464,10 +1469,12 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @throws AmbariException
*/
private void makeCommandStage(UpgradeContext context, RequestStageContainer request,
- StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
- boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
+ RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+ StageWrapper wrapper, boolean skippable, boolean supportsAutoSkipOnFailure,
+ boolean allowRetry) throws AmbariException {
Cluster cluster = context.getCluster();
+ StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
List<RequestResourceFilter> filters = new ArrayList<>();
@@ -1490,7 +1497,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
break;
}
- Map<String, String> commandParams = getNewParameterMap(request, context);
+ Map<String, String> commandParams = getNewParameterMap(request, context,
+ effectiveRepositoryVersion);
// Apply additional parameters to the command that come from the stage.
applyAdditionalParameters(wrapper, commandParams);
@@ -1498,7 +1506,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
function, filters, commandParams);
- actionContext.setStackId(effectiveStackId);
+ actionContext.setRepositoryVersion(effectiveRepositoryVersion);
actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
actionContext.setRetryAllowed(allowRetry);
@@ -1540,7 +1548,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
private void makeServiceCheckStage(UpgradeContext context, RequestStageContainer request,
- StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
+ RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+ StageWrapper wrapper, boolean skippable,
boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
List<RequestResourceFilter> filters = new ArrayList<>();
@@ -1550,8 +1559,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
}
Cluster cluster = context.getCluster();
+ StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
- Map<String, String> commandParams = getNewParameterMap(request, context);
+ Map<String, String> commandParams = getNewParameterMap(request, context,
+ effectiveRepositoryVersion);
// Apply additional parameters to the command that come from the stage.
applyAdditionalParameters(wrapper, commandParams);
@@ -1559,7 +1570,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
"SERVICE_CHECK", filters, commandParams);
- actionContext.setStackId(effectiveStackId);
+ actionContext.setRepositoryVersion(effectiveRepositoryVersion);
actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
actionContext.setRetryAllowed(allowRetry);
actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
@@ -1586,7 +1597,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
stage.setStageId(stageId);
entity.setStageId(Long.valueOf(stageId));
- Map<String, String> requestParams = getNewParameterMap(request, context);
+ Map<String, String> requestParams = getNewParameterMap(request, context,
+ effectiveRepositoryVersion);
+
s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams, jsons);
request.addStages(Collections.singletonList(stage));
@@ -1599,10 +1612,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* upgrade context
* @param request
* upgrade request
- * @param effectiveStackId
- * the stack ID to use when building the command. This will determine
- * things like stack tools and version information added to the
- * command (not {@code null}).
+ * @param effectiveRepositoryVersion
+ * the stack/version to use when building the command. This will
+ * determine things like stack tools and version information added to
+ * the command (not {@code null}).
* @param entity
* a single of upgrade
* @param task
@@ -1617,14 +1630,18 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @throws AmbariException
*/
private void makeServerSideStage(UpgradeContext context, RequestStageContainer request,
- StackId effectiveStackId, UpgradeItemEntity entity, ServerSideActionTask task,
+ RepositoryVersionEntity effectiveRepositoryVersion, UpgradeItemEntity entity,
+ ServerSideActionTask task,
boolean skippable, boolean supportsAutoSkipOnFailure, boolean allowRetry,
UpgradePack upgradePack, ConfigUpgradePack configUpgradePack)
throws AmbariException {
Cluster cluster = context.getCluster();
+ StackId effectiveStackId = effectiveRepositoryVersion.getStackId();
+
+ Map<String, String> commandParams = getNewParameterMap(request, context,
+ effectiveRepositoryVersion);
- Map<String, String> commandParams = getNewParameterMap(request, context);
commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName());
// Notice that this does not apply any params because the input does not specify a stage.
@@ -1702,7 +1719,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
Role.AMBARI_SERVER_ACTION.toString(), Collections.<RequestResourceFilter> emptyList(),
commandParams);
- actionContext.setStackId(effectiveStackId);
+ actionContext.setRepositoryVersion(effectiveRepositoryVersion);
actionContext.setTimeout(Short.valueOf((short) -1));
actionContext.setRetryAllowed(allowRetry);
actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
@@ -1743,11 +1760,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* following properties are already set:
* <ul>
* <li>{@link UpgradeContext#COMMAND_PARAM_CLUSTER_NAME}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_SOURCE_STACK}
+ * <li>{@link UpgradeContext#COMMAND_PARAM_TARGET_STACK}
* <li>{@link UpgradeContext#COMMAND_PARAM_VERSION}
* <li>{@link UpgradeContext#COMMAND_PARAM_DIRECTION}
- * <li>{@link UpgradeContext#COMMAND_PARAM_ORIGINAL_STACK}
- * <li>{@link UpgradeContext#COMMAND_PARAM_TARGET_STACK}
- * <li>{@link UpgradeContext#COMMAND_DOWNGRADE_FROM_VERSION}
* <li>{@link UpgradeContext#COMMAND_PARAM_UPGRADE_TYPE}
* <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
* order to have the commands contain the correct configurations. Otherwise,
@@ -1761,10 +1777,13 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
* @return the initialized parameter map.
*/
private Map<String, String> getNewParameterMap(RequestStageContainer requestStageContainer,
- UpgradeContext context) {
+ UpgradeContext context, RepositoryVersionEntity effectiveRepositoryVersion) {
Map<String, String> parameters = context.getInitializedCommandParameters();
+
parameters.put(UpgradeContext.COMMAND_PARAM_REQUEST_ID,
String.valueOf(requestStageContainer.getId()));
+
+ parameters.put(UpgradeContext.COMMAND_PARAM_VERSION, effectiveRepositoryVersion.getVersion());
return parameters;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 0e6f0c4..d531460 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -30,6 +30,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.events.StackUpgradeFinishEvent;
import org.apache.ambari.server.events.publishers.VersionEventPublisher;
@@ -73,26 +74,13 @@ public class FinalizeUpgradeAction extends AbstractServerAction {
public static final String CLUSTER_NAME_KEY = "cluster_name";
public static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
- public static final String VERSION_KEY = "version";
+ public static final String VERSION_KEY = KeyNames.VERSION;
+ public static final String TARGET_STACK_KEY = KeyNames.TARGET_STACK;
public static final String REQUEST_ID = "request_id";
public static final String PREVIOUS_UPGRADE_NOT_COMPLETED_MSG = "It is possible that a previous upgrade was not finalized. " +
"For this reason, Ambari will not remove any configs. Please ensure that all database records are correct.";
/**
- * The original "current" stack of the cluster before the upgrade started.
- * This is the same regardless of whether the current direction is
- * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
- */
- public static final String ORIGINAL_STACK_KEY = "original_stack";
-
- /**
- * The target upgrade stack before the upgrade started. This is the same
- * regardless of whether the current direction is {@link Direction#UPGRADE} or
- * {@link Direction#DOWNGRADE}.
- */
- public static final String TARGET_STACK_KEY = "target_stack";
-
- /**
* The Cluster that this ServerAction implementation is executing on
*/
@Inject
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 60d02a3..842da95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -17,11 +17,16 @@
*/
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentMap;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleCommand;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.dao.ArtifactDAO;
import org.apache.ambari.server.orm.entities.ArtifactEntity;
@@ -37,10 +42,7 @@ import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
/**
* Update the user-defined Kerberos Descriptor to work with the current stack.
@@ -56,25 +58,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
* @see Direction
*/
private static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-
- /**
- * The original "current" stack of the cluster before the upgrade started.
- * This is the same regardless of whether the current direction is
- * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
- *
- * @see Direction
- */
- private static final String ORIGINAL_STACK_KEY = "original_stack";
-
- /**
- * The target upgrade stack before the upgrade started. This is the same
- * regardless of whether the current direction is {@link Direction#UPGRADE} or
- * {@link Direction#DOWNGRADE}.
- *
- * @see Direction
- */
- private static final String TARGET_STACK_KEY = "target_stack";
-
private final static String KERBEROS_DESCRIPTOR_NAME = "kerberos_descriptor";
private final static String KERBEROS_DESCRIPTOR_BACKUP_NAME = "kerberos_descriptor_backup";
@@ -104,21 +87,21 @@ public class UpgradeUserKerberosDescriptor extends AbstractServerAction {
HostRoleCommand hostRoleCommand = getHostRoleCommand();
String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName();
Cluster cluster = clusters.getCluster(clusterName);
- List<String> messages = new ArrayList<String>();
- List<String> errorMessages = new ArrayList<String>();
+ List<String> messages = new ArrayList<>();
+ List<String> errorMessages = new ArrayList<>();
if (cluster != null) {
logMessage(messages, "Obtaining the user-defined Kerberos descriptor");
- TreeMap<String, String> foreignKeys = new TreeMap<String, String>();
+ TreeMap<String, String> foreignKeys = new TreeMap<>();
foreignKeys.put("cluster", String.valueOf(cluster.getClusterId()));
ArtifactEntity entity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor", foreignKeys);
KerberosDescriptor userDescriptor = (entity == null) ? null : kerberosDescriptorFactory.createInstance(entity.getArtifactData());
if (userDescriptor != null) {
- StackId originalStackId = getStackIdFromCommandParams(ORIGINAL_STACK_KEY);
- StackId targetStackId = getStackIdFromCommandParams(TARGET_STACK_KEY);
+ StackId originalStackId = cluster.getCurrentStackVersion();
+ StackId targetStackId = getStackIdFromCommandParams(KeyNames.TARGET_STACK);
if (isDowngrade()) {
restoreDescriptor(foreignKeys, messages, errorMessages);
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index b97dc80..93e6393 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -62,21 +62,15 @@ public class UpgradeContext {
public static final String COMMAND_PARAM_UPGRADE_TYPE = "upgrade_type";
public static final String COMMAND_PARAM_TASKS = "tasks";
public static final String COMMAND_PARAM_STRUCT_OUT = "structured_out";
- public static final String COMMAND_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
- /**
- * The original "current" stack of the cluster before the upgrade started.
- * This is the same regardless of whether the current direction is
- * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
- */
- public static final String COMMAND_PARAM_ORIGINAL_STACK = "original_stack";
+ public static final String COMMAND_PARAM_SOURCE_STACK = KeyNames.SOURCE_STACK;
+ public static final String COMMAND_PARAM_TARGET_STACK = KeyNames.TARGET_STACK;
- /**
- * The target upgrade stack before the upgrade started. This is the same
- * regardless of whether the current direction is {@link Direction#UPGRADE} or
- * {@link Direction#DOWNGRADE}.
- */
- public static final String COMMAND_PARAM_TARGET_STACK = "target_stack";
+ @Deprecated
+ @Experimental(
+ feature = ExperimentalFeature.STACK_UPGRADES_BETWEEN_VENDORS,
+ comment = "This isn't needed anymore, but many python classes still use it")
+ public static final String COMMAND_PARAM_DOWNGRADE_FROM_VERSION = "downgrade_from_version";
/**
* The cluster that the upgrade is for.
@@ -528,6 +522,7 @@ public class UpgradeContext {
* <ul>
* <li>{@link #COMMAND_PARAM_CLUSTER_NAME}
* <li>{@link #COMMAND_PARAM_DIRECTION}
+ * <li>{@link #COMMAND_PARAM_DOWNGRADE_FROM_VERSION}
* <li>{@link #COMMAND_PARAM_UPGRADE_TYPE}
* <li>{@link KeyNames#REFRESH_CONFIG_TAGS_BEFORE_EXECUTION} - necessary in
* order to have the commands contain the correct configurations. Otherwise,
@@ -542,8 +537,16 @@ public class UpgradeContext {
public Map<String, String> getInitializedCommandParameters() {
Map<String, String> parameters = new HashMap<>();
+ Direction direction = getDirection();
parameters.put(COMMAND_PARAM_CLUSTER_NAME, m_cluster.getClusterName());
- parameters.put(COMMAND_PARAM_DIRECTION, getDirection().name().toLowerCase());
+ parameters.put(COMMAND_PARAM_DIRECTION, direction.name().toLowerCase());
+
+ parameters.put(COMMAND_PARAM_SOURCE_STACK, m_fromRepositoryVersion.getStackId().getStackId());
+ parameters.put(COMMAND_PARAM_TARGET_STACK, m_toRepositoryVersion.getStackId().getStackId());
+
+ if (direction == Direction.DOWNGRADE) {
+ parameters.put(COMMAND_PARAM_DOWNGRADE_FROM_VERSION, m_fromRepositoryVersion.getVersion());
+ }
if (null != getType()) {
// use the serialized attributes of the enum to convert it to a string,
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index b54b565..5dbbaed 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -21,6 +21,7 @@ limitations under the License.
import socket
import status_params
from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import Direction
from setup_spark import *
@@ -57,10 +58,8 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
java_home = config['hostLevelParams']['java_home']
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-if upgrade_direction == Direction.DOWNGRADE:
- stack_version_unformatted = config['commandParams']['original_stack'].split("-")[1]
-stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
@@ -71,7 +70,7 @@ spark_conf = '/etc/spark/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
hadoop_home = stack_select.get_hadoop_dir("home")
spark_conf = format("{stack_root}/current/{component_directory}/conf")
spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
@@ -212,7 +211,7 @@ dfs_type = default("/commandParams/dfs_type", "")
# livy is only supported from HDP 2.5
has_livyserver = False
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+if check_stack_feature(StackFeature.SPARK_LIVY, version_for_stack_feature_checks) and "livy-env" in config['configurations']:
livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
livy_log_dir = config['configurations']['livy-env']['livy_log_dir']
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
index 4034532..bc86a67 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/setup_spark.py
@@ -114,11 +114,11 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
mode=0644
)
- effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+ effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
if effective_version:
effective_version = format_stack_version(effective_version)
- if effective_version and check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
+ if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
File(os.path.join(params.spark_conf, 'java-opts'),
owner=params.spark_user,
group=params.spark_group,
@@ -130,7 +130,7 @@ def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
action="delete"
)
- if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+ if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
# create spark-thrift-fairscheduler.xml
File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
owner=params.spark_user,
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
index 31a296a..2838186 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/spark_service.py
@@ -34,11 +34,11 @@ def spark_service(name, upgrade_type=None, action=None):
if action == 'start':
- effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+ effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
if effective_version:
effective_version = format_stack_version(effective_version)
- if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+ if name == 'jobhistoryserver' and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
# copy spark-hdp-assembly.jar to hdfs
copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
# create spark history directory
@@ -58,7 +58,7 @@ def spark_service(name, upgrade_type=None, action=None):
# Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
# need to copy the tarball, otherwise, copy it.
- if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+ if check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version_for_stack_feature_checks):
resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
if resource_created:
params.HdfsResource(None, action="execute")
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 47d2a81..6675c58 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -409,7 +409,6 @@ public class ComponentVersionCheckActionTest {
Map<String, String> commandParams = new HashMap<>();
commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
- commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
ExecutionCommand executionCommand = new ExecutionCommand();
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index c9c0dd0..037e47b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -677,7 +677,6 @@ public class UpgradeActionTest {
Map<String, String> commandParams = new HashMap<>();
commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
- commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
ExecutionCommand executionCommand = new ExecutionCommand();
@@ -860,7 +859,6 @@ public class UpgradeActionTest {
Map<String, String> commandParams = new HashMap<>();
commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
- commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
ExecutionCommand executionCommand = new ExecutionCommand();
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index 6b80623..0da9088 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -25,6 +25,11 @@ import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.Assert.assertEquals;
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+
import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
import org.apache.ambari.server.actionmanager.HostRoleCommand;
import org.apache.ambari.server.agent.ExecutionCommand;
@@ -33,6 +38,7 @@ import org.apache.ambari.server.orm.dao.ArtifactDAO;
import org.apache.ambari.server.orm.entities.ArtifactEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
import org.apache.ambari.server.state.kerberos.KerberosDescriptorUpdateHelper;
@@ -45,11 +51,6 @@ import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-
/**
* Tests OozieConfigCalculation logic
*/
@@ -63,6 +64,7 @@ public class UpgradeUserKerberosDescriptorTest {
private ArtifactDAO artifactDAO;
private TreeMap<String, Field> fields = new TreeMap<>();
+ private StackId HDP_24 = new StackId("HDP", "2.4");
@Before
public void setup() throws Exception {
@@ -74,6 +76,7 @@ public class UpgradeUserKerberosDescriptorTest {
expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
expect(cluster.getClusterId()).andReturn(1l).atLeastOnce();
+ expect(cluster.getCurrentStackVersion()).andReturn(HDP_24).atLeastOnce();
replay(clusters, cluster);
prepareFields();
@@ -83,10 +86,9 @@ public class UpgradeUserKerberosDescriptorTest {
@Test
public void testUpgrade() throws Exception {
- Map<String, String> commandParams = new HashMap<String, String>();
+ Map<String, String> commandParams = new HashMap<>();
commandParams.put("clusterName", "c1");
commandParams.put("upgrade_direction", "UPGRADE");
- commandParams.put("original_stack", "HDP-2.4");
commandParams.put("target_stack", "HDP-2.5");
ExecutionCommand executionCommand = new ExecutionCommand();
@@ -141,10 +143,9 @@ public class UpgradeUserKerberosDescriptorTest {
@Test
public void testDowngrade() throws Exception {
- Map<String, String> commandParams = new HashMap<String, String>();
+ Map<String, String> commandParams = new HashMap<>();
commandParams.put("clusterName", "c1");
commandParams.put("upgrade_direction", "DOWNGRADE");
- commandParams.put("original_stack", "HDP-2.4");
commandParams.put("target_stack", "HDP-2.5");
ExecutionCommand executionCommand = new ExecutionCommand();
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/TestStackFeature.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestStackFeature.py b/ambari-server/src/test/python/TestStackFeature.py
index 0116a7a..230734c 100644
--- a/ambari-server/src/test/python/TestStackFeature.py
+++ b/ambari-server/src/test/python/TestStackFeature.py
@@ -28,6 +28,32 @@ from unittest import TestCase
Logger.initialize_logger()
class TestStackFeature(TestCase):
+ """
+ EU Upgrade (HDP 2.5 to HDP 2.6)
+ - STOP
+ hostLevelParams/stack_name = HDP
+ hostLevelParams/stack_version = 2.5
+ hostLevelParams/current_version = 2.5.0.0-1237
+ commandParams/version = 2.5.0.0-1237
+ - START
+ hostLevelParams/stack_name = HDP
+ hostLevelParams/stack_version = 2.6
+ hostLevelParams/current_version = 2.5.0.0-1237
+ commandParams/version = 2.6.0.0-334
+
+ EU Downgrade (HDP 2.6 to HDP 2.5)
+ - STOP
+ hostLevelParams/stack_name = HDP
+ hostLevelParams/stack_version = 2.6
+ hostLevelParams/current_version = 2.5.0.0-1237
+ commandParams/version = 2.6.0.0-334
+ - START
+ hostLevelParams/stack_name = HDP
+ hostLevelParams/stack_version = 2.5
+ hostLevelParams/current_version = 2.5.0.0-1237
+ commandParams/version = 2.5.0.0-1237
+ """
+
def test_get_stack_feature_version_missing_params(self):
try:
stack_feature_version = get_stack_feature_version({})
@@ -122,7 +148,7 @@ class TestStackFeature(TestCase):
"current_version": "2.4.0.0-1234"
},
"commandParams": {
- "original_stack": "2.4",
+ "source_stack": "2.4",
"target_stack": "2.5",
"upgrade_direction": "upgrade",
"version": "2.5.9.9-9999"
@@ -143,8 +169,8 @@ class TestStackFeature(TestCase):
"current_version":"2.4.0.0-1234"
},
"commandParams":{
- "original_stack":"2.4",
- "target_stack":"2.5",
+ "source_stack":"2.5",
+ "target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.4.0.0-1234",
"downgrade_from_version": "2.5.9.9-9999"
@@ -166,10 +192,10 @@ class TestStackFeature(TestCase):
"current_version":"2.4.0.0-1234"
},
"commandParams":{
- "original_stack":"2.4",
- "target_stack":"2.5",
+ "source_stack":"2.5",
+ "target_stack":"2.4",
"upgrade_direction":"downgrade",
- "version":"2.4.0.0-1234",
+ "version":"2.5.9.9-9999",
"downgrade_from_version":"2.5.9.9-9999"
}
}
@@ -189,10 +215,10 @@ class TestStackFeature(TestCase):
"custom_command":"STOP"
},
"commandParams":{
- "original_stack":"2.4",
- "target_stack":"2.5",
+ "source_stack":"2.5",
+ "target_stack":"2.4",
"upgrade_direction":"downgrade",
- "version":"2.4.0.0-1234",
+ "version":"2.5.9.9-9999",
"downgrade_from_version":"2.5.9.9-9999"
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
index e1a89a8..8e03b7f 100644
--- a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
+++ b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
@@ -225,7 +225,7 @@ class TestRUSetAll(RMFTestCase):
# alter JSON for a downgrade from 2.3 to 2.2
json_payload['commandParams']['version'] = "2.2.0.0-1234"
json_payload['commandParams']['downgrade_from_version'] = "2.3.0.0-1234"
- json_payload['commandParams']['original_stack'] = "HDP-2.2"
+ json_payload['commandParams']['source_stack'] = "HDP-2.2"
json_payload['commandParams']['target_stack'] = "HDP-2.3"
json_payload['commandParams']['upgrade_direction'] = "downgrade"
json_payload['hostLevelParams']['stack_version'] = "2.2"
@@ -263,7 +263,7 @@ class TestRUSetAll(RMFTestCase):
json_payload['commandParams']['version'] = "2.3.0.0-1234"
json_payload['commandParams']['downgrade_from_version'] = "2.3.0.0-5678"
- json_payload['commandParams']['original_stack'] = "HDP-2.3"
+ json_payload['commandParams']['source_stack'] = "HDP-2.3"
json_payload['commandParams']['target_stack'] = "HDP-2.3"
json_payload['commandParams']['upgrade_direction'] = "downgrade"
json_payload['hostLevelParams']['stack_version'] = "2.3"
@@ -291,7 +291,7 @@ class TestRUSetAll(RMFTestCase):
# alter JSON for a downgrade from 2.2 to 2.2
json_payload['commandParams']['version'] = "2.2.0.0-1234"
json_payload['commandParams']['downgrade_from_version'] = "2.2.0.0-5678"
- json_payload['commandParams']['original_stack'] = "HDP-2.2"
+ json_payload['commandParams']['source_stack'] = "HDP-2.2"
json_payload['commandParams']['target_stack'] = "HDP-2.2"
json_payload['commandParams']['upgrade_direction'] = "downgrade"
json_payload['hostLevelParams']['stack_version'] = "2.2"
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 7f77d83..3aadf2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -25,7 +25,7 @@
"upgrade_type": "nonrolling_upgrade",
"version": "2.3.2.0-2844",
"forceRefreshConfigTagsBeforeExecution": "*",
- "original_stack": "HDP-2.2",
+ "source_stack": "HDP-2.2",
"command_timeout": "1200",
"target_stack": "HDP-2.3",
"desired_namenode_role": "standby",
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 87b18af..2d48ff6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -25,7 +25,7 @@
"upgrade_type": "nonrolling_upgrade",
"version": "2.3.2.0-2844",
"forceRefreshConfigTagsBeforeExecution": "*",
- "original_stack": "HDP-2.2",
+ "source_stack": "HDP-2.2",
"command_timeout": "1200",
"target_stack": "HDP-2.3",
"desired_namenode_role": "standby",
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 99fcba0..021695b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -13,7 +13,7 @@
"upgrade_type": "nonrolling_upgrade",
"version": "2.3.2.0-2950",
"forceRefreshConfigTagsBeforeExecution": "*",
- "original_stack": "HDP-2.3",
+ "source_stack": "HDP-2.3",
"command_timeout": "1200",
"target_stack": "HDP-2.3",
"script_type": "PYTHON"
http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ac40bc/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index a9db11c..1805c3b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -22,7 +22,7 @@
"upgrade_type": "rolling_upgrade",
"command_retry_max_attempt_count": "3",
"version": "2.3.0.0-2096",
- "original_stack": "HDP-2.3",
+ "source_stack": "HDP-2.3",
"command_retry_enabled": "false",
"command_timeout": "1200",
"target_stack": "HDP-2.3",
[03/13] ambari git commit: Revert "AMBARI-21427. Assigning hosts
concurrently to same config group may fail with
"org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException:
Config group already exist". (stoader)"
Posted by jo...@apache.org.
Revert "AMBARI-21427. Assigning hosts concurrently to same config group may fail with "org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException: Config group already exist". (stoader)"
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e92b5035
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e92b5035
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e92b5035
Branch: refs/heads/branch-feature-AMBARI-21348
Commit: e92b50359653636de49718df8cf2eab36b186b37
Parents: a21fa12
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon Jul 10 23:11:38 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon Jul 10 23:11:38 2017 +0300
----------------------------------------------------------------------
.../ambari/server/topology/AmbariContext.java | 81 +++++---------------
1 file changed, 19 insertions(+), 62 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92b5035/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index dee0e6c..106d7c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -30,7 +30,6 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
import javax.annotation.Nullable;
import javax.inject.Inject;
@@ -70,11 +69,9 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.utils.RetryHelper;
import org.slf4j.Logger;
@@ -82,8 +79,6 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Striped;
-import com.google.inject.Provider;
/**
@@ -104,12 +99,6 @@ public class AmbariContext {
@Inject
ConfigFactory configFactory;
- /**
- * Used for getting configuration property values from stack and services.
- */
- @Inject
- private Provider<ConfigHelper> configHelper;
-
private static AmbariManagementController controller;
private static ClusterController clusterController;
//todo: task id's. Use existing mechanism for getting next task id sequence
@@ -123,16 +112,6 @@ public class AmbariContext {
private final static Logger LOG = LoggerFactory.getLogger(AmbariContext.class);
-
- /**
- * When config groups are created using Blueprints these are created when
- * hosts join a hostgroup and are added to the corresponding config group.
- * Since hosts join in parallel there might be a race condition in creating
- * the config group a host is to be added to. Thus we need to synchronize
- * the creation of config groups with the same name.
- */
- private Striped<Lock> configGroupCreateLock = Striped.lazyWeakLock(1);
-
public boolean isClusterKerberosEnabled(long clusterId) {
Cluster cluster;
try {
@@ -188,10 +167,9 @@ public class AmbariContext {
public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
Stack stack = topology.getBlueprint().getStack();
- StackId stackId = new StackId(stack.getName(), stack.getVersion());
createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
- createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
+ createAmbariServiceAndComponentResources(topology, clusterName);
}
public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -218,8 +196,7 @@ public class AmbariContext {
}
}
- public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
- StackId stackId, String repositoryVersion) {
+ public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
Collection<String> services = topology.getBlueprint().getServices();
try {
@@ -228,13 +205,11 @@ public class AmbariContext {
} catch (AmbariException e) {
throw new RuntimeException("Failed to persist service and component resources: " + e, e);
}
- Set<ServiceRequest> serviceRequests = new HashSet<>();
- Set<ServiceComponentRequest> componentRequests = new HashSet<>();
+ Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+ Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
for (String service : services) {
String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
- serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
- repositoryVersion, null, credentialStoreEnabled));
-
+ serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
for (String component : topology.getBlueprint().getComponents(service)) {
String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@ -248,14 +223,14 @@ public class AmbariContext {
}
// set all services state to INSTALLED->STARTED
// this is required so the user can start failed services at the service level
- Map<String, Object> installProps = new HashMap<>();
+ Map<String, Object> installProps = new HashMap<String, Object>();
installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Map<String, Object> startProps = new HashMap<>();
+ Map<String, Object> startProps = new HashMap<String, Object>();
startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
- Predicate predicate = new EqualsPredicate<>(
- ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+ Predicate predicate = new EqualsPredicate<String>(
+ ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
try {
getServiceResourceProvider().updateResources(
new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@ -287,9 +262,9 @@ public class AmbariContext {
}
String clusterName = cluster.getClusterName();
- Map<String, Object> properties = new HashMap<>();
+ Map<String, Object> properties = new HashMap<String, Object>();
properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
- properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
+ properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
try {
@@ -300,7 +275,7 @@ public class AmbariContext {
hostName, e.toString()), e);
}
- final Set<ServiceComponentHostRequest> requests = new HashSet<>();
+ final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
String service = entry.getKey();
@@ -353,17 +328,11 @@ public class AmbariContext {
}
public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) {
- String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
-
- Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName);
-
try {
- configGroupLock.lock();
-
boolean hostAdded = RetryHelper.executeWithRetry(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
- return addHostToExistingConfigGroups(hostName, topology, qualifiedGroupName);
+ return addHostToExistingConfigGroups(hostName, topology, groupName);
}
});
if (!hostAdded) {
@@ -373,9 +342,6 @@ public class AmbariContext {
LOG.error("Unable to register config group for host: ", e);
throw new RuntimeException("Unable to register config group for host: " + hostName);
}
- finally {
- configGroupLock.unlock();
- }
}
public RequestStatusResponse installHost(String hostName, String clusterName, Collection<String> skipInstallForComponents, Collection<String> dontSkipInstallForComponents, boolean skipFailure) {
@@ -583,7 +549,7 @@ public class AmbariContext {
/**
* Add the new host to an existing config group.
*/
- private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String configGroupName) {
+ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology topology, String groupName) {
boolean addedHost = false;
Clusters clusters;
Cluster cluster;
@@ -597,8 +563,9 @@ public class AmbariContext {
// I don't know of a method to get config group by name
//todo: add a method to get config group by name
Map<Long, ConfigGroup> configGroups = cluster.getConfigGroups();
+ String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName);
for (ConfigGroup group : configGroups.values()) {
- if (group.getName().equals(configGroupName)) {
+ if (group.getName().equals(qualifiedGroupName)) {
try {
Host host = clusters.getHost(hostName);
addedHost = true;
@@ -622,7 +589,7 @@ public class AmbariContext {
* and the hosts associated with the host group are assigned to the config group.
*/
private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
- Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
+ Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
Stack stack = topology.getBlueprint().getStack();
// get the host-group config with cluster creation template overrides
@@ -641,7 +608,7 @@ public class AmbariContext {
//todo: attributes
Map<String, Config> serviceConfigs = groupConfigs.get(service);
if (serviceConfigs == null) {
- serviceConfigs = new HashMap<>();
+ serviceConfigs = new HashMap<String, Config>();
groupConfigs.put(service, serviceConfigs);
}
serviceConfigs.put(type, config);
@@ -702,16 +669,6 @@ public class AmbariContext {
return String.format("%s:%s", bpName, hostGroupName);
}
- /**
- * Gets an instance of {@link ConfigHelper} for classes which are not
- * dependency injected.
- *
- * @return a {@link ConfigHelper} instance.
- */
- public ConfigHelper getConfigHelper() {
- return configHelper.get();
- }
-
private synchronized HostResourceProvider getHostResourceProvider() {
if (hostResourceProvider == null) {
hostResourceProvider = (HostResourceProvider)