You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2018/08/10 13:52:35 UTC
[ambari] branch trunk updated: AMBARI-24434. Cannot deploy HBase
without HDFS (#2021)
This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push:
new ec55bee AMBARI-24434. Cannot deploy HBase without HDFS (#2021)
ec55bee is described below
commit ec55beefd4cc16eb0242eb0de1326faac2b39e6d
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Fri Aug 10 15:52:31 2018 +0200
AMBARI-24434. Cannot deploy HBase without HDFS (#2021)
---
.../controller/internal/BlueprintConfigurationProcessor.java | 2 +-
.../common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py | 10 +++++++++-
.../HBASE/0.96.0.2.0/package/scripts/params_linux.py | 2 +-
3 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 985d2f0..6e1ec46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2905,7 +2905,7 @@ public class BlueprintConfigurationProcessor {
hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE"));
hdfsSiteMap.put("dfs.namenode.rpc-address", new SingleHostTopologyUpdater("NAMENODE"));
coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE"));
- hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
+ hbaseSiteMap.put("hbase.rootdir", new OptionalSingleHostTopologyUpdater("NAMENODE"));
accumuloSiteMap.put("instance.volumes", new SingleHostTopologyUpdater("NAMENODE"));
// HDFS shared.edits JournalNode Quorum URL uses semi-colons as separators
multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';', false, false, true));
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 9bf990d..d086baf 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -208,7 +208,7 @@ def hbase(name=None):
group=params.user_group,
owner=params.hbase_user
)
- if name == "master":
+ if name == "master" and params.default_fs:
if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(params.default_fs).scheme:
params.HdfsResource(params.hbase_hdfs_root_dir,
type="directory",
@@ -230,6 +230,14 @@ def hbase(name=None):
)
params.HdfsResource(None, action="execute")
+ if name in ('master', 'regionserver') and not params.default_fs:
+ Directory(params.hbase_staging_dir,
+ owner = params.hbase_user,
+ create_parents = True,
+ cd_access = "a",
+ mode = 0711,
+ )
+
if params.phoenix_enabled:
Package(params.phoenix_package,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 0f01ea3..911700b 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -260,7 +260,7 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
+default_fs = default('configurations/core-site/fs.defaultFS', None)
dfs_type = default("/clusterLevelParams/dfs_type", "")