You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2019/06/27 16:58:04 UTC
[hadoop] branch trunk updated: HDFS-14036. RBF: Add
hdfs-rbf-default.xml to HdfsConfiguration by default. Contributed by
Takanobu Asanuma.
This is an automated email from the ASF dual-hosted git repository.
inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 041e0c0 HDFS-14036. RBF: Add hdfs-rbf-default.xml to HdfsConfiguration by default. Contributed by Takanobu Asanuma.
041e0c0 is described below
commit 041e0c056453612c613ee71b59823b67dc3122d1
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Thu Jun 27 09:57:52 2019 -0700
HDFS-14036. RBF: Add hdfs-rbf-default.xml to HdfsConfiguration by default. Contributed by Takanobu Asanuma.
---
.../main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java | 1 +
.../hdfs/server/federation/router/RouterClientProtocol.java | 2 +-
.../apache/hadoop/fs/contract/router/RouterHDFSContract.java | 1 -
.../hadoop/fs/contract/router/web/RouterWebHDFSContract.java | 1 -
.../hadoop/hdfs/server/federation/MiniRouterDFSCluster.java | 11 ++++++++++-
5 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 4d4ade2..ec615ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -35,6 +35,7 @@ public class HdfsConfiguration extends Configuration {
// adds the default resources
Configuration.addDefaultResource("hdfs-default.xml");
+ Configuration.addDefaultResource("hdfs-rbf-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 9d33608..5e4bd2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -153,7 +153,7 @@ public class RouterClientProtocol implements ClientProtocol {
this.mountStatusTimeOut = conf.getTimeDuration(
RBFConfigKeys.DFS_ROUTER_CLIENT_MOUNT_TIME_OUT,
RBFConfigKeys.DFS_ROUTER_CLIENT_MOUNT_TIME_OUT_DEFAULT,
- TimeUnit.SECONDS);
+ TimeUnit.MILLISECONDS);
// User and group for reporting
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
index 572da90..eaf874b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
@@ -58,7 +58,6 @@ public class RouterHDFSContract extends HDFSContract {
// Start NNs and DNs and wait until ready
cluster.startCluster(conf);
- cluster.addRouterOverrides(conf);
// Start routers with only an RPC service
cluster.startRouters();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java
index 4e205df..1d30807 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java
@@ -68,7 +68,6 @@ public class RouterWebHDFSContract extends HDFSContract {
// Start NNs and DNs and wait until ready
cluster.startCluster(conf);
- cluster.addRouterOverrides(conf);
// Start routers with only an RPC service
cluster.startRouters();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
index f0bf271..a770e36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java
@@ -136,6 +136,8 @@ public class MiniRouterDFSCluster {
/** Cache flush interval in milliseconds. */
private long cacheFlushInterval;
+ /** Router configuration initializes. */
+ private Configuration routerConf;
/** Router configuration overrides. */
private Configuration routerOverrides;
/** Namenode configuration overrides. */
@@ -533,7 +535,12 @@ public class MiniRouterDFSCluster {
*/
public Configuration generateRouterConfiguration(String nsId, String nnId) {
- Configuration conf = new HdfsConfiguration(false);
+ Configuration conf;
+ if (this.routerConf == null) {
+ conf = new Configuration(false);
+ } else {
+ conf = new Configuration(routerConf);
+ }
conf.addResource(generateNamenodeConfiguration(nsId));
conf.setInt(DFS_ROUTER_HANDLER_COUNT_KEY, 10);
@@ -778,6 +785,8 @@ public class MiniRouterDFSCluster {
Configuration nnConf = generateNamenodeConfiguration(ns0);
if (overrideConf != null) {
nnConf.addResource(overrideConf);
+ // Router also uses this configurations as initial values.
+ routerConf = new Configuration(overrideConf);
}
cluster = new MiniDFSCluster.Builder(nnConf)
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org