You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/09/29 15:17:57 UTC
[3/3] ambari git commit: AMBARI-22083 - Wrong Hadoop Home Directory
Is Being Picked Up on MAINT/PATCH Upgraded Clusters (jonathanhurley)
AMBARI-22083 - Wrong Hadoop Home Directory Is Being Picked Up on MAINT/PATCH Upgraded Clusters (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5433e479
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5433e479
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5433e479
Branch: refs/heads/branch-2.6
Commit: 5433e479260dc9e13aacae1ea9edb9c29d6b96cb
Parents: 6de11b8
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Sep 27 11:52:11 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Sep 29 11:09:10 2017 -0400
----------------------------------------------------------------------
.../libraries/functions/component_version.py | 26 +--
.../libraries/functions/conf_select.py | 79 ++--------
.../libraries/functions/stack_select.py | 69 ++++----
.../ambari/server/agent/HeartbeatMonitor.java | 6 +-
.../ambari/server/agent/StatusCommand.java | 27 +++-
.../package/scripts/hive_server_upgrade.py | 5 -
.../0.12.0.2.0/package/scripts/params_linux.py | 5 +-
.../0.12.0.2.0/package/scripts/status_params.py | 8 +-
.../HIVE/0.12.0.2.0/package/scripts/webhcat.py | 2 +-
.../4.0/hooks/after-INSTALL/scripts/params.py | 2 +-
.../4.0/hooks/before-ANY/scripts/params.py | 6 +-
.../4.0/hooks/before-START/scripts/params.py | 2 +-
.../4.2.5/hooks/after-INSTALL/scripts/params.py | 2 +-
.../4.2.5/hooks/before-ANY/scripts/params.py | 6 +-
.../4.2.5/hooks/before-START/scripts/params.py | 2 +-
.../2.0.6/hooks/after-INSTALL/scripts/params.py | 11 +-
.../2.0.6/hooks/before-ANY/scripts/params.py | 53 +++----
.../before-ANY/scripts/shared_initialization.py | 8 -
.../2.0.6/hooks/before-START/scripts/params.py | 17 +-
.../services/ECS/package/scripts/params.py | 2 +-
.../stacks/2.0.6/HBASE/test_hbase_client.py | 1 -
.../stacks/2.0.6/HBASE/test_hbase_master.py | 6 +-
.../2.0.6/HBASE/test_phoenix_queryserver.py | 7 +
.../python/stacks/2.0.6/HDFS/test_datanode.py | 38 ++---
.../python/stacks/2.0.6/HDFS/test_namenode.py | 5 +-
.../stacks/2.0.6/HIVE/test_hive_metastore.py | 42 +++--
.../stacks/2.0.6/HIVE/test_hive_server.py | 158 ++++++++++---------
.../2.0.6/HIVE/test_hive_service_check.py | 4 +-
.../stacks/2.0.6/HIVE/test_webhcat_server.py | 41 +++--
.../stacks/2.0.6/OOZIE/test_oozie_server.py | 9 ++
.../2.0.6/OOZIE/test_oozie_service_check.py | 5 +-
.../stacks/2.0.6/YARN/test_historyserver.py | 5 +-
.../stacks/2.0.6/YARN/test_mapreduce2_client.py | 1 +
.../hooks/after-INSTALL/test_after_install.py | 12 +-
.../2.0.6/hooks/before-ANY/test_before_any.py | 9 --
.../stacks/2.1/FALCON/test_falcon_server.py | 26 +--
.../stacks/2.1/HIVE/test_hive_metastore.py | 54 ++++---
.../stacks/2.2/PIG/test_pig_service_check.py | 13 ++
.../stacks/2.2/SPARK/test_job_history_server.py | 18 +--
.../stacks/2.3/MAHOUT/test_mahout_client.py | 2 +-
.../2.3/MAHOUT/test_mahout_service_check.py | 28 ++--
.../2.3/SPARK/test_spark_thrift_server.py | 8 +-
.../stacks/2.5/RANGER_KMS/test_kms_server.py | 24 +--
.../python/stacks/2.5/SPARK/test_spark_livy.py | 16 +-
.../stacks/2.5/configs/ranger-kms-secured.json | 6 +-
.../test/python/stacks/2.6/DRUID/test_druid.py | 20 +--
.../stacks/2.6/SPARK2/test_spark_livy2.py | 16 +-
47 files changed, 459 insertions(+), 453 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-common/src/main/python/resource_management/libraries/functions/component_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/component_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/component_version.py
index a1fd6b2..169b339 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/component_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/component_version.py
@@ -20,7 +20,7 @@ limitations under the License.
from resource_management.libraries.script.script import Script
-def get_component_repository_version(service_name, component_name = None):
+def get_component_repository_version(service_name = None, component_name = None):
"""
Gets the version associated with the specified component from the structure in the command.
Every command should contain a mapping of service/component to the desired repository it's set
@@ -29,11 +29,16 @@ def get_component_repository_version(service_name, component_name = None):
:service_name: the name of the service
:component_name: the name of the component
"""
- versions = _get_component_repositories()
+ config = Script.get_config()
+
+ versions = _get_component_repositories(config)
if versions is None:
return None
- if service_name not in versions:
+ if service_name is None:
+ service_name = config['serviceName'] if config is not None and 'serviceName' in config else None
+
+ if service_name is None or service_name not in versions:
return None
component_versions = versions[service_name]
@@ -41,22 +46,23 @@ def get_component_repository_version(service_name, component_name = None):
return None
if component_name is None:
- for component in component_versions:
- return component_versions[component]
+ component_name = config["role"] if config is not None and "role" in config else None
- if not component_name in component_versions:
- return None
+ # return a direct match of component name
+ if component_name is not None and component_name in component_versions:
+ return component_versions[component_name]
- return component_versions[component_name]
+ # fall back to the first one for the service
+ return component_versions.values()[0]
-def _get_component_repositories():
+def _get_component_repositories(config):
"""
Gets an initialized dictionary from the value in componentVersionMap. This structure is
sent on every command by Ambari and should contain each service & component's desired repository.
+ :config: the configuration dictionary
:return:
"""
- config = Script.get_config()
if "componentVersionMap" not in config or config["componentVersionMap"] is "":
return None
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index ffcaad5..86821bf 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -34,9 +34,11 @@ from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import Link
+from resource_management.libraries.functions import component_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import stack_tools
from resource_management.core.exceptions import Fail
+from resource_management.core import sudo
from resource_management.core.shell import as_sudo
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
@@ -215,79 +217,28 @@ def select(stack_name, package, version, try_create=True, ignore_errors=False):
-def get_hadoop_conf_dir(force_latest_on_upgrade=False):
+def get_hadoop_conf_dir():
"""
- Gets the shared hadoop conf directory using:
- 1. Start with /etc/hadoop/conf
- 2. When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
- 3. Only when doing a RU and HDP-2.3 or higher, use the value as computed
- by <conf-selector-tool>. This is in the form <stack-root>/VERSION/hadoop/conf to make sure
- the configs are written in the correct place. However, if the component itself has
- not yet been upgraded, it should use the hadoop configs from the prior version.
- This will perform an <stack-selector-tool> status to determine which version to use.
- :param force_latest_on_upgrade: if True, then force the returned path to always
- be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
- is primarily used by hooks like before-ANY to ensure that hadoop environment
- configurations are written to the correct location since they are written out
- before the <stack-selector-tool>/<conf-selector-tool> would have been called.
+ Return the hadoop shared conf directory which should be used for the command's component. The
+ directory including the component's version is tried first, but if that doesn't exist,
+ this will fallback to using "current".
"""
- hadoop_conf_dir = "/etc/hadoop/conf"
- stack_name = None
stack_root = Script.get_stack_root()
stack_version = Script.get_stack_version()
- version = None
- if not Script.in_stack_upgrade():
- # During normal operation, the HDP stack must be 2.3 or higher
- if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
- hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
-
- if stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version):
- hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
- stack_name = default("/hostLevelParams/stack_name", None)
+ hadoop_conf_dir = os.path.join(os.path.sep, "etc", "hadoop", "conf")
+ if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version):
+ # read the desired version from the component map and use that for building the hadoop home
+ version = component_version.get_component_repository_version()
+ if version is None:
version = default("/commandParams/version", None)
- if not os.path.islink(hadoop_conf_dir) and stack_name and version:
- version = str(version)
- else:
- # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
- # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
- # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
- # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
- if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
+ hadoop_conf_dir = os.path.join(stack_root, str(version), "hadoop", "conf")
+ if version is None or sudo.path_isdir(hadoop_conf_dir) is False:
hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
- # This contains the "version", including the build number, that is actually used during a stack upgrade and
- # is the version upgrading/downgrading to.
- stack_info = stack_select._get_upgrade_stack()
-
- if stack_info is None:
- raise Fail("Unable to retrieve the upgrade/downgrade stack information from the request")
-
- stack_name = stack_info[0]
- version = stack_info[1]
-
- Logger.info(
- "An upgrade/downgrade for {0}-{1} is in progress, determining which hadoop conf dir to use.".format(
- stack_name, version))
-
- # This is the version either upgrading or downgrading to.
- if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
- # Determine if <stack-selector-tool> has been run and if not, then use the current
- # hdp version until this component is upgraded.
- if not force_latest_on_upgrade:
- current_stack_version = stack_select.get_role_component_current_stack_version()
- if current_stack_version is not None and version != current_stack_version:
- version = current_stack_version
- stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
- Logger.info("{0} has not yet been called to update the symlink for this component, "
- "keep using version {1}".format(stack_selector_name, current_stack_version))
-
- # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
- hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
- Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))
-
- Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
+ Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
+
return hadoop_conf_dir
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
index f5068e4..d8f3d37 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -28,16 +28,18 @@ import ambari_simplejson as json
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import component_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import stack_tools
from resource_management.core import shell
+from resource_management.core import sudo
from resource_management.core.shell import call
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
-from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import stack_features
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import upgrade_summary
@@ -352,17 +354,13 @@ def get_role_component_current_stack_version():
return current_stack_version
-def get_hadoop_dir(target, force_latest_on_upgrade=False):
+def get_hadoop_dir(target):
"""
- Return the hadoop shared directory in the following override order
- 1. Use default for 2.1 and lower
- 2. If 2.2 and higher, use <stack-root>/current/hadoop-client/{target}
- 3. If 2.2 and higher AND for an upgrade, use <stack-root>/<version>/hadoop/{target}.
- However, if the upgrade has not yet invoked <stack-selector-tool>, return the current
- version of the component.
+ Return the hadoop shared directory which should be used for the command's component. The
+ directory including the component's version is tried first, but if that doesn't exist,
+ this will fallback to using "current".
+
:target: the target directory
- :force_latest_on_upgrade: if True, then this will return the "current" directory
- without the stack version built into the path, such as <stack-root>/current/hadoop-client
"""
stack_root = Script.get_stack_root()
stack_version = Script.get_stack_version()
@@ -373,35 +371,26 @@ def get_hadoop_dir(target, force_latest_on_upgrade=False):
hadoop_dir = HADOOP_DIR_DEFAULTS[target]
formatted_stack_version = format_stack_version(stack_version)
- if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
+
+ if stack_features.check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
+ # read the desired version from the component map and use that for building the hadoop home
+ version = component_version.get_component_repository_version()
+ if version is None:
+ version = default("/commandParams/version", None)
+
# home uses a different template
if target == "home":
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
+ hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, version, "hadoop")
+ if version is None or sudo.path_isdir(hadoop_dir) is False:
+ hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)
-
- # if we are not forcing "current" for HDP 2.2, then attempt to determine
- # if the exact version needs to be returned in the directory
- if not force_latest_on_upgrade:
- stack_info = _get_upgrade_stack()
-
- if stack_info is not None:
- stack_version = stack_info[1]
-
- # determine if <stack-selector-tool> has been run and if not, then use the current
- # hdp version until this component is upgraded
- current_stack_version = get_role_component_current_stack_version()
- if current_stack_version is not None and stack_version != current_stack_version:
- stack_version = current_stack_version
-
- if target == "home":
- # home uses a different template
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
- else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
+ hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, version, "hadoop", target)
+ if version is None or sudo.path_isdir(hadoop_dir) is False:
+ hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)
return hadoop_dir
+
def get_hadoop_dir_for_stack_version(target, stack_version):
"""
Return the hadoop shared directory for the provided stack version. This is necessary
@@ -414,15 +403,11 @@ def get_hadoop_dir_for_stack_version(target, stack_version):
if not target in HADOOP_DIR_DEFAULTS:
raise Fail("Target {0} not defined".format(target))
- hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
- formatted_stack_version = format_stack_version(stack_version)
- if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
- # home uses a different template
- if target == "home":
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
- else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
+ # home uses a different template
+ if target == "home":
+ hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
+ else:
+ hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
return hadoop_dir
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index 0042f53..a77ed75 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -210,7 +210,7 @@ public class HeartbeatMonitor implements Runnable {
* @return list of commands to get status of service components on a concrete host
*/
public List<StatusCommand> generateStatusCommands(String hostname) throws AmbariException {
- List<StatusCommand> cmds = new ArrayList<StatusCommand>();
+ List<StatusCommand> cmds = new ArrayList<>();
for (Cluster cl : clusters.getClustersForHost(hostname)) {
Map<String, DesiredConfig> desiredConfigs = cl.getDesiredConfigs();
@@ -249,8 +249,8 @@ public class HeartbeatMonitor implements Runnable {
StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
stackId.getStackVersion());
- Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
- Map<String, Map<String, Map<String, String>>> configurationAttributes = new TreeMap<String, Map<String, Map<String, String>>>();
+ Map<String, Map<String, String>> configurations = new TreeMap<>();
+ Map<String, Map<String, Map<String, String>>> configurationAttributes = new TreeMap<>();
// get the cluster config for type '*-env'
// apply config group overrides
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
index 5dec53c..133da0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
@@ -17,12 +17,13 @@
*/
package org.apache.ambari.server.agent;
-import com.google.gson.annotations.SerializedName;
-import org.apache.ambari.server.state.State;
-
import java.util.HashMap;
import java.util.Map;
+import org.apache.ambari.server.state.State;
+
+import com.google.gson.annotations.SerializedName;
+
/**
* Command to report the status of a list of services in roles.
*/
@@ -38,6 +39,9 @@ public class StatusCommand extends AgentCommand {
@SerializedName("serviceName")
private String serviceName;
+ @SerializedName("role")
+ private String role;
+
@SerializedName("componentName")
private String componentName;
@@ -48,10 +52,10 @@ public class StatusCommand extends AgentCommand {
private Map<String, Map<String, Map<String, String>>> configurationAttributes;
@SerializedName("commandParams")
- private Map<String, String> commandParams = new HashMap<String, String>();
+ private Map<String, String> commandParams = new HashMap<>();
@SerializedName("hostLevelParams")
- private Map<String, String> hostLevelParams = new HashMap<String, String>();
+ private Map<String, String> hostLevelParams = new HashMap<>();
@SerializedName("hostname")
private String hostname = null;
@@ -120,8 +124,17 @@ public class StatusCommand extends AgentCommand {
return componentName;
}
+ /**
+ * Sets both the {@code componentName} and the {@code role}. Status commands
+ * use the {@code componentName}, while execution commands use the
+ * {@code role}. It's simpler for the Python to just worry about {@code role},
+ * so this ensures that both are set.
+ *
+ * @param componentName
+ */
public void setComponentName(String componentName) {
this.componentName = componentName;
+ role = componentName;
}
public Map<String, Map<String, String>> getConfigurations() {
@@ -164,6 +177,10 @@ public class StatusCommand extends AgentCommand {
return hostname;
}
+ public String getRole() {
+ return role;
+ }
+
public enum StatusCommandPayload {
// The minimal payload for status, agent adds necessary details
MINIMAL,
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
index 12c9e1c..1cb95ff 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
@@ -58,11 +58,6 @@ def deregister():
if current_hiveserver_version is None:
raise Fail('Unable to determine the current HiveServer2 version to deregister.')
- # fallback when upgrading because <stack-root>/current/hive-server2/conf/conf.server may not exist
- hive_server_conf_dir = params.hive_server_conf_dir
- if not os.path.exists(hive_server_conf_dir):
- hive_server_conf_dir = "/etc/hive/conf.server"
-
# deregister
hive_execute_path = params.execute_path
# If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 77e1bed..bcc1826 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -17,7 +17,6 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
-
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
@@ -36,6 +35,7 @@ from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
@@ -109,7 +109,8 @@ stack_supports_hive_interactive_ga = check_stack_feature(StackFeature.HIVE_INTER
component_directory = status_params.component_directory
component_directory_interactive = status_params.component_directory_interactive
-hadoop_home = format('{stack_root}/current/hadoop-client')
+hadoop_home = stack_select.get_hadoop_dir("home")
+
hive_bin = format('{stack_root}/current/{component_directory}/bin')
hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
index f5b00ac..3a3e3f0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import stack_features
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
@@ -103,17 +103,17 @@ else:
hive_conf_dir = format("{stack_root}/current/{component_directory}/conf")
hive_client_conf_dir = format("{stack_root}/current/{component_directory}/conf")
- if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version_formatted_major):
+ if stack_features.check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version_formatted_major):
hive_server_conf_dir = format("{stack_root}/current/{component_directory}/conf/conf.server")
hive_conf_dir = hive_server_conf_dir
- if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, stack_version_formatted_major):
+ if stack_features.check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, stack_version_formatted_major):
# this is NOT a typo. Configs for hcatalog/webhcat point to a
# specific directory which is NOT called 'conf'
webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/etc/webhcat")
# if stack version supports hive serve interactive
- if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, stack_version_formatted_major):
+ if stack_features.check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, stack_version_formatted_major):
hive_server_interactive_conf_dir = format("{stack_root}/current/{component_directory_interactive}/conf/conf.server")
hive_config_dir = hive_client_conf_dir
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
index 7f5eff6..e2036e5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
@@ -82,7 +82,7 @@ def webhcat():
)
# if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
- if params.stack_version_formatted_major and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
+ if check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
params.version and params.stack_root:
XmlConfig("hive-site.xml",
conf_dir = format("{stack_root}/{version}/hive/conf"),
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
index d3332db..f5c716b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
@@ -85,4 +85,4 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0
if has_namenode:
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
index 5ffd28c..91212bd 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
@@ -95,8 +95,8 @@ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
@@ -186,7 +186,7 @@ has_ranger_admin = not len(ranger_admin_hosts) == 0
if has_namenode:
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hbase_tmp_dir = "/tmp/hbase-hbase"
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
index 5c84a05..d72868a 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
@@ -120,7 +120,7 @@ metrics_collection_period = default("/configurations/ams-site/timeline.metrics.s
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
index c497054..0de8fe7 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
@@ -90,7 +90,7 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0
if has_namenode or dfs_type == 'HCFS':
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
index b0467a9..1ed6d4d 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
@@ -98,8 +98,8 @@ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
@@ -187,7 +187,7 @@ has_falcon_server_hosts = not len(falcon_server_hosts) == 0
has_ranger_admin = not len(ranger_admin_hosts) == 0
if has_namenode or dfs_type == 'HCFS':
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hbase_tmp_dir = "/tmp/hbase-hbase"
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
index be9db58..615fcff 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
@@ -139,7 +139,7 @@ metrics_collection_period = default("/configurations/ams-site/timeline.metrics.s
if has_namenode or dfs_type == 'HCFS':
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 4d7eaee..29a74e7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -43,16 +43,9 @@ stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)
# default hadoop params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
- mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
- # not supported in HDP 2.2+
- hadoop_conf_empty_dir = None
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
versioned_stack_root = '/usr/hdp/current'
@@ -93,7 +86,7 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0
if has_namenode or dfs_type == 'HCFS':
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
link_configs_lock_file = get_config_lock_file()
stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index e085225..8ad0d51 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -101,49 +101,38 @@ def is_secure_port(port):
else:
return False
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
-versioned_stack_root = '/usr/hdp/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
- mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
- # not supported in HDP 2.2+
- hadoop_conf_empty_dir = None
-
- if not security_enabled:
- hadoop_secure_dn_user = '""'
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+if not security_enabled:
+ hadoop_secure_dn_user = '""'
+else:
+ dfs_dn_port = get_port(dfs_dn_addr)
+ dfs_dn_http_port = get_port(dfs_dn_http_addr)
+ dfs_dn_https_port = get_port(dfs_dn_https_addr)
+ # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+ if dfs_http_policy == "HTTPS_ONLY":
+ secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+ elif dfs_http_policy == "HTTP_AND_HTTPS":
+ secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+ else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+ secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+ if secure_dn_ports_are_in_use:
+ hadoop_secure_dn_user = hdfs_user
else:
- dfs_dn_port = get_port(dfs_dn_addr)
- dfs_dn_http_port = get_port(dfs_dn_http_addr)
- dfs_dn_https_port = get_port(dfs_dn_https_addr)
- # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
- if dfs_http_policy == "HTTPS_ONLY":
- secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
- elif dfs_http_policy == "HTTP_AND_HTTPS":
- secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
- else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
- secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
- if secure_dn_ports_are_in_use:
- hadoop_secure_dn_user = hdfs_user
- else:
- hadoop_secure_dn_user = '""'
+ hadoop_secure_dn_user = '""'
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
@@ -219,7 +208,7 @@ if dfs_ha_namenode_ids:
dfs_ha_enabled = True
if has_namenode or dfs_type == 'HCFS':
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hbase_tmp_dir = "/tmp/hbase-hbase"
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 3997117..3dfffdd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -189,14 +189,6 @@ def setup_hadoop_env():
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
- # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
- if Script.is_stack_less_than("2.2"):
- Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
- group=params.user_group )
-
- Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
- not_if=format("ls {hadoop_conf_dir}"))
-
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 5ca2d94..55a6093 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -68,20 +68,13 @@ hadoop_metrics2_properties_content = None
if 'hadoop-metrics2.properties' in config['configurations']:
hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = '/usr'
-create_lib_snappy_symlinks = True
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
- mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
- hadoop_home = stack_select.get_hadoop_dir("home")
- create_lib_snappy_symlinks = False
+
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
current_service = config['serviceName']
@@ -189,7 +182,7 @@ if has_zk_host:
if has_namenode or dfs_type == 'HCFS':
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
- hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
index c304a93..652c23e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
@@ -49,7 +49,7 @@ smoke_hdfs_user_mode = 0770
java64_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hadoop_dir = "/etc/hadoop"
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index 135b239..1cde55a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -223,7 +223,6 @@ class TestHBaseClient(RMFTestCase):
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-client', '2.2.1.0-2067'), sudo=True)
self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-client', '2.2.1.0-2067'), sudo=True)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
- self.assertEquals(1, mocks_dict['call'].call_count)
@patch("resource_management.core.shell.call")
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 4ade11a..370b776 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -678,7 +678,7 @@ class TestHBaseMaster(RMFTestCase):
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
- hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+ hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
)
@@ -694,7 +694,7 @@ class TestHBaseMaster(RMFTestCase):
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
- hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+ hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0711,
@@ -711,7 +711,7 @@ class TestHBaseMaster(RMFTestCase):
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
- hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+ hadoop_conf_dir = '/etc/hadoop/conf',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-master/conf start master',
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 972aa61..f27a3b9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -34,6 +34,7 @@ class TestPhoenixQueryServer(RMFTestCase):
CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"PHOENIX_QUERY_SERVER"}
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_configure_default(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -48,6 +49,7 @@ class TestPhoenixQueryServer(RMFTestCase):
self.assert_configure_default()
self.assertNoMoreResources()
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_start_default(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -67,6 +69,7 @@ class TestPhoenixQueryServer(RMFTestCase):
)
self.assertNoMoreResources()
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_stop_default(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -90,6 +93,7 @@ class TestPhoenixQueryServer(RMFTestCase):
)
self.assertNoMoreResources()
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_configure_secured(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -104,6 +108,7 @@ class TestPhoenixQueryServer(RMFTestCase):
self.assert_configure_secured()
self.assertNoMoreResources()
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_start_secured(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -123,6 +128,7 @@ class TestPhoenixQueryServer(RMFTestCase):
)
self.assertNoMoreResources()
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_stop_secured(self):
self.executeScript(
self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -422,6 +428,7 @@ class TestPhoenixQueryServer(RMFTestCase):
content = InlineTemplate('log4jproperties\nline2')
)
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.3/configs/hbase_default.json"
with open(config_file, "r") as f:
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 24b0347..b1a4154 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -25,9 +25,11 @@ from resource_management.core import shell
import itertools
from resource_management.core.exceptions import Fail
import resource_management.libraries.functions.mounted_dirs_helper
+from resource_management.libraries.functions import conf_select
@patch.object(resource_management.libraries.functions, 'check_process_status', new = MagicMock())
@patch.object(Script, 'format_package_name', new = MagicMock())
+@patch.object(conf_select, "get_hadoop_conf_dir", new=MagicMock(return_value="/usr/hdp/current/hadoop-client/conf"))
class TestDatanode(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
@@ -73,7 +75,7 @@ class TestDatanode(RMFTestCase):
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
- self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
+ self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
@@ -96,7 +98,7 @@ class TestDatanode(RMFTestCase):
checked_call_mocks = side_effect,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
+ self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")
@@ -143,7 +145,7 @@ class TestDatanode(RMFTestCase):
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
- self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode',
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
@@ -163,7 +165,7 @@ class TestDatanode(RMFTestCase):
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assert_configure_secured("2.2", snappy_enabled=False)
+ self.assert_configure_secured("2.3", snappy_enabled=False)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
@@ -183,8 +185,8 @@ class TestDatanode(RMFTestCase):
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
- self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode',
- environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/2.1.0.0-1234/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode',
+ environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.1.0.0-1234/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
self.assertNoMoreResources()
@@ -206,7 +208,7 @@ class TestDatanode(RMFTestCase):
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assert_configure_secured("2.2", snappy_enabled=False)
+ self.assert_configure_secured("2.3", snappy_enabled=False)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
@@ -226,8 +228,8 @@ class TestDatanode(RMFTestCase):
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
- self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'",
- environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
+ self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/hdp/2.1.0.0-1234/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'",
+ environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.1.0.0-1234/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
)
self.assertNoMoreResources()
@@ -249,7 +251,7 @@ class TestDatanode(RMFTestCase):
checked_call_mocks = side_effect,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")
@@ -279,8 +281,8 @@ class TestDatanode(RMFTestCase):
checked_call_mocks = side_effect,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
- environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
+ self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/2.1.0.0-1234/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
+ environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.1.0.0-1234/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
@@ -312,8 +314,8 @@ class TestDatanode(RMFTestCase):
checked_call_mocks = side_effect,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
- self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
- environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
+ self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/hdp/2.1.0.0-1234/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
+ environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.1.0.0-1234/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
@@ -346,19 +348,19 @@ class TestDatanode(RMFTestCase):
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
- conf_dir = '/etc/hadoop/conf',
+ conf_dir = '/usr/hdp/current/hadoop-client/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
- conf_dir = '/etc/hadoop/conf',
+ conf_dir = '/usr/hdp/current/hadoop-client/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
mode = 0644
)
- self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
+ self.assertResourceCalled('File', '/usr/hdp/current/hadoop-client/conf/slaves',
content = Template('slaves.j2'),
owner = 'hdfs',
)
@@ -390,7 +392,7 @@ class TestDatanode(RMFTestCase):
)
def assert_configure_secured(self, stackVersion=STACK_VERSION, snappy_enabled=True):
- conf_dir = '/etc/hadoop/conf'
+ conf_dir = '/usr/hdp/current/hadoop-client/conf'
if stackVersion != self.STACK_VERSION:
conf_dir = '/usr/hdp/current/hadoop-client/conf'
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index ae51abf..805cd8b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1357,6 +1357,7 @@ class TestNamenode(RMFTestCase):
@patch("hdfs_namenode.is_this_namenode_active")
@patch("resource_management.libraries.functions.setup_ranger_plugin_xml.setup_ranger_plugin")
@patch("utils.get_namenode_states")
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_upgrade_restart_eu_with_ranger(self, get_namenode_states_mock, setup_ranger_plugin_mock, is_active_nn_mock):
is_active_nn_mock.return_value = True
@@ -1613,6 +1614,7 @@ class TestNamenode(RMFTestCase):
self.assertEquals("/usr/lib/hadoop/sbin", sys.modules["params"].hadoop_bin)
@patch.object(shell, "call")
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_pre_upgrade_restart_22_params(self, call_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
with open(config_file, "r") as f:
@@ -1633,7 +1635,7 @@ class TestNamenode(RMFTestCase):
call_mocks = [(0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)],
mocks_dict = mocks_dict)
import sys
- self.assertEquals("/usr/hdp/current/hadoop-client/conf", sys.modules["params"].hadoop_conf_dir)
+ self.assertEquals("/etc/hadoop/conf", sys.modules["params"].hadoop_conf_dir)
self.assertEquals("/usr/hdp/{0}/hadoop/libexec".format(version), sys.modules["params"].hadoop_libexec_dir)
self.assertEquals("/usr/hdp/{0}/hadoop/bin".format(version), sys.modules["params"].hadoop_bin_dir)
self.assertEquals("/usr/hdp/{0}/hadoop/sbin".format(version), sys.modules["params"].hadoop_bin)
@@ -1669,6 +1671,7 @@ class TestNamenode(RMFTestCase):
@patch("namenode_upgrade.create_upgrade_marker", MagicMock())
+ @patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_express_upgrade_skips_safemode_and_directory_creation(self):
"""
Tests that we wait for Safemode to be OFF no matter what except for EU. And, because of that,
http://git-wip-us.apache.org/repos/asf/ambari/blob/5433e479/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
index 0ce6282..452c0b6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
@@ -22,7 +22,19 @@ import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
+from resource_management.libraries.functions import stack_features
+
+# used for faking out stack features when the config files used by unit tests use older stacks
+def mock_stack_feature(stack_feature, stack_version):
+ if stack_feature == "rolling_upgrade":
+ return True
+ if stack_feature == "config_versioning":
+ return True
+
+ return False
+
@patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output", new=MagicMock(return_value=(0,'123','')))
+@patch.object(stack_features, "check_stack_feature", new=MagicMock(side_effect=mock_stack_feature))
class TestHiveMetastore(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
STACK_VERSION = "2.0.6"
@@ -49,13 +61,13 @@ class TestHiveMetastore(RMFTestCase):
self.assert_configure_default()
self.assert_init_schema()
- self.assertResourceCalled('Execute', '/tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.err /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
+ self.assertResourceCalled('Execute', '/tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.err /var/run/hive/hive.pid /usr/hdp/current/hive-server2/conf/conf.server /var/log/hive',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client',
'HIVE_BIN': 'hive',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user = 'hive',
- path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/bin'],
+ path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
@@ -110,13 +122,13 @@ class TestHiveMetastore(RMFTestCase):
)
self.assert_configure_secured()
self.assert_init_schema()
- self.assertResourceCalled('Execute', '/tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.err /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
- environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client',
+ self.assertResourceCalled('Execute', '/tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.err /var/run/hive/hive.pid /usr/hdp/current/hive-server2/conf/conf.server /var/log/hive',
+ environment = {'HADOOP_HOME': '/usr/hdp/2.1.0.0-1234/hadoop',
'HIVE_BIN': 'hive',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user = 'hive',
- path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/bin'],
+ path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/2.1.0.0-1234/hadoop/bin'],
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
@@ -194,7 +206,7 @@ class TestHiveMetastore(RMFTestCase):
)
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
group = 'hadoop',
- conf_dir = '/etc/hive/conf.server',
+ conf_dir = '/usr/hdp/current/hive-server2/conf/conf.server',
mode = 0600,
configuration_attributes = {u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
@@ -202,7 +214,7 @@ class TestHiveMetastore(RMFTestCase):
owner = 'hive',
configurations = self.getConfig()['configurations']['hive-site'],
)
- self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+ self.assertResourceCalled('File', '/usr/hdp/current/hive-server2/conf/conf.server/hive-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
owner = 'hive',
group = 'hadoop',
@@ -233,7 +245,7 @@ class TestHiveMetastore(RMFTestCase):
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
mode = 0644,
)
- self.assertResourceCalled('File', '/etc/hive/conf.server/hadoop-metrics2-hivemetastore.properties',
+ self.assertResourceCalled('File', '/usr/hdp/current/hive-server2/conf/conf.server/hadoop-metrics2-hivemetastore.properties',
owner = 'hive',
group = 'hadoop',
content = Template('hadoop-metrics2-hivemetastore.properties.j2'),
@@ -308,7 +320,7 @@ class TestHiveMetastore(RMFTestCase):
)
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
group = 'hadoop',
- conf_dir = '/etc/hive/conf.server',
+ conf_dir = '/usr/hdp/current/hive-server2/conf/conf.server',
mode = 0600,
configuration_attributes = {u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
@@ -316,7 +328,7 @@ class TestHiveMetastore(RMFTestCase):
owner = 'hive',
configurations = self.getConfig()['configurations']['hive-site'],
)
- self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+ self.assertResourceCalled('File', '/usr/hdp/current/hive-server2/conf/conf.server/hive-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
owner = 'hive',
group = 'hadoop',
@@ -333,7 +345,7 @@ class TestHiveMetastore(RMFTestCase):
group = 'root',
mode = 0644,
)
- self.assertResourceCalled('File', '/etc/hive/conf.server/zkmigrator_jaas.conf',
+ self.assertResourceCalled('File', '/usr/hdp/current/hive-server2/conf/conf.server/zkmigrator_jaas.conf',
content = Template('zkmigrator_jaas.conf.j2'),
owner = 'hive',
group = 'hadoop',
@@ -352,7 +364,7 @@ class TestHiveMetastore(RMFTestCase):
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
mode = 0644,
)
- self.assertResourceCalled('File', '/etc/hive/conf.server/hadoop-metrics2-hivemetastore.properties',
+ self.assertResourceCalled('File', '/usr/hdp/current/hive-server2/conf/conf.server/hadoop-metrics2-hivemetastore.properties',
owner = 'hive',
group = 'hadoop',
content = Template('hadoop-metrics2-hivemetastore.properties.j2'),
@@ -385,8 +397,8 @@ class TestHiveMetastore(RMFTestCase):
)
def assert_init_schema(self):
- self.assertResourceCalled('Execute', 'export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/hdp/current/hive-server2/bin/schematool -initSchema -dbType mysql -userName hive -passWord \'!`"\'"\'"\' 1\' -verbose',
- not_if = 'ambari-sudo.sh su hive -l -s /bin/bash -c \'[RMF_EXPORT_PLACEHOLDER]export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/hdp/current/hive-server2/bin/schematool -info -dbType mysql -userName hive -passWord \'"\'"\'!`"\'"\'"\'"\'"\'"\'"\'"\'"\' 1\'"\'"\' -verbose\'',
+ self.assertResourceCalled('Execute', 'export HIVE_CONF_DIR=/usr/hdp/current/hive-server2/conf/conf.server ; /usr/hdp/current/hive-server2/bin/schematool -initSchema -dbType mysql -userName hive -passWord \'!`"\'"\'"\' 1\' -verbose',
+ not_if = 'ambari-sudo.sh su hive -l -s /bin/bash -c \'[RMF_EXPORT_PLACEHOLDER]export HIVE_CONF_DIR=/usr/hdp/current/hive-server2/conf/conf.server ; /usr/hdp/current/hive-server2/bin/schematool -info -dbType mysql -userName hive -passWord \'"\'"\'!`"\'"\'"\'"\'"\'"\'"\'"\'"\' 1\'"\'"\' -verbose\'',
user = 'hive',
)
@@ -540,7 +552,7 @@ class TestHiveMetastore(RMFTestCase):
environment = {'HADOOP_HOME': '/usr/hdp/2.3.0.0-1234/hadoop', 'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45', 'HIVE_BIN': '/usr/hdp/current/hive-server2/bin/hive'},
not_if = None,
user = 'hive',
- path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'])
+ path = ['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/2.3.0.0-1234/hadoop/bin'])
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive aaa com.mysql.jdbc.Driver',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],