You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2018/09/04 19:19:47 UTC

[1/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5902c0658 -> 2e744bd31
  refs/heads/branch-2.9 6ed97eba2 -> 2e869cbce
  refs/heads/branch-3.0 5514f02a7 -> 54d3189cf
  refs/heads/branch-3.1 142d878c9 -> a26565960
  refs/heads/trunk 6bbd24901 -> 54f204459


HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54f20445
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54f20445
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54f20445

Branch: refs/heads/trunk
Commit: 54f2044595206455484284b43e5976c8a1982aaf
Parents: 6bbd249
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Sep 4 12:17:17 2018 -0700

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 39 +++++++++++++++++++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java      |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml     |  8 ++++
 .../resolver/TestMountTableResolver.java        | 26 +++++++++++++
 5 files changed, 76 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
         DFS_ROUTER_DEFAULT_NAMESERVICE,
         DFSUtil.getNamenodeNameServiceId(conf));
 
+    this.defaultNSEnable = conf.getBoolean(
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
     if (defaultNameService == null) {
       LOG.warn(
           "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
     }
 
     if (this.defaultNameService.equals("")) {
+      this.defaultNSEnable = false;
       LOG.warn("Default name service is not set.");
     } else {
-      LOG.info("Default name service: {}", this.defaultNameService);
+      String enable = this.defaultNSEnable ? "enabled" : "disabled";
+      LOG.info("Default name service: {}, {} to read or write",
+          this.defaultNameService, enable);
     }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
    * @param path Path to check/insert.
    * @return New remote location.
    */
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     MountTable entry = findDeepest(path);
     if (entry != null) {
       ret = buildLocation(path, entry);
     } else {
       // Not found, use default location
+      if (!defaultNSEnable) {
+        throw new IOException("Cannot find locations for " + path + ", " +
+            "because the default nameservice is disabled to read or write");
+      }
       RemoteLocation remoteLocation =
           new RemoteLocation(defaultNameService, path, path);
       List<RemoteLocation> locations =
@@ -623,4 +638,24 @@ public class MountTableResolver
     }
     throw new IOException("localCache is null");
   }
+
+  @VisibleForTesting
+  public String getDefaultNameService() {
+    return defaultNameService;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNameService(String defaultNameService) {
+    this.defaultNameService = defaultNameService;
+  }
+
+  @VisibleForTesting
+  public boolean isDefaultNSEnable() {
+    return defaultNSEnable;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNSEnable(boolean defaultNSRWEnable) {
+    this.defaultNSEnable = defaultNSRWEnable;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 997e1dd..bbd4250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -42,6 +42,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
       "dfs.federation.router.";
   public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
       FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE =
+      FEDERATION_ROUTER_PREFIX + "default.nameservice.enable";
+  public static final boolean DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT =
+      true;
   public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
       FEDERATION_ROUTER_PREFIX + "handler.count";
   public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 2deda9f..165b429 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1339,7 +1339,7 @@ public class RouterRpcServer extends AbstractService
           this.subclusterResolver.getDestinationForPath(path);
       if (location == null) {
         throw new IOException("Cannot find locations for " + path + " in " +
-            this.subclusterResolver);
+            this.subclusterResolver.getClass().getSimpleName());
       }
 
       // We may block some write operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index ed39d4b..3f56043 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -32,6 +32,14 @@
   </property>
 
   <property>
+    <name>dfs.federation.router.default.nameservice.enable</name>
+    <value>true</value>
+    <description>
+      The default subcluster is enabled to read and write files.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.federation.router.rpc.enable</name>
     <value>true</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index b19a973..5e3b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -175,6 +176,31 @@ public class TestMountTableResolver {
 
   }
 
+  @Test
+  public void testDefaultNameServiceEnable() throws IOException {
+    assertTrue(mountTable.isDefaultNSEnable());
+    mountTable.setDefaultNameService("3");
+    mountTable.removeEntry("/");
+
+    assertEquals("3->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+
+    Map<String, String> map = getMountTableEntry("4", "/unknown");
+    mountTable.addEntry(MountTable.newInstance("/unknown", map));
+    mountTable.setDefaultNSEnable(false);
+    assertFalse(mountTable.isDefaultNSEnable());
+
+    assertEquals("4->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+    try {
+      mountTable.getDestinationForPath("/");
+      fail("The getDestinationForPath call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "the default nameservice is disabled to read or write", ioe);
+    }
+  }
+
   private void compareLists(List<String> list1, String[] list2) {
     assertEquals(list1.size(), list2.length);
     for (String item : list2) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

Posted by in...@apache.org.
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e744bd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e744bd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e744bd3

Branch: refs/heads/branch-2
Commit: 2e744bd31d68a8d7aec258a18873f220502d9f54
Parents: 5902c06
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Sep 4 12:18:38 2018 -0700

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 39 +++++++++++++++++++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java      |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml     |  8 ++++
 .../resolver/TestMountTableResolver.java        | 26 +++++++++++++
 5 files changed, 76 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
         DFS_ROUTER_DEFAULT_NAMESERVICE,
         DFSUtil.getNamenodeNameServiceId(conf));
 
+    this.defaultNSEnable = conf.getBoolean(
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
     if (defaultNameService == null) {
       LOG.warn(
           "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
     }
 
     if (this.defaultNameService.equals("")) {
+      this.defaultNSEnable = false;
       LOG.warn("Default name service is not set.");
     } else {
-      LOG.info("Default name service: {}", this.defaultNameService);
+      String enable = this.defaultNSEnable ? "enabled" : "disabled";
+      LOG.info("Default name service: {}, {} to read or write",
+          this.defaultNameService, enable);
     }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
    * @param path Path to check/insert.
    * @return New remote location.
    */
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     MountTable entry = findDeepest(path);
     if (entry != null) {
       ret = buildLocation(path, entry);
     } else {
       // Not found, use default location
+      if (!defaultNSEnable) {
+        throw new IOException("Cannot find locations for " + path + ", " +
+            "because the default nameservice is disabled to read or write");
+      }
       RemoteLocation remoteLocation =
           new RemoteLocation(defaultNameService, path, path);
       List<RemoteLocation> locations =
@@ -623,4 +638,24 @@ public class MountTableResolver
     }
     throw new IOException("localCache is null");
   }
+
+  @VisibleForTesting
+  public String getDefaultNameService() {
+    return defaultNameService;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNameService(String defaultNameService) {
+    this.defaultNameService = defaultNameService;
+  }
+
+  @VisibleForTesting
+  public boolean isDefaultNSEnable() {
+    return defaultNSEnable;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNSEnable(boolean defaultNSRWEnable) {
+    this.defaultNSEnable = defaultNSRWEnable;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 997e1dd..bbd4250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -42,6 +42,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
       "dfs.federation.router.";
   public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
       FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE =
+      FEDERATION_ROUTER_PREFIX + "default.nameservice.enable";
+  public static final boolean DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT =
+      true;
   public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
       FEDERATION_ROUTER_PREFIX + "handler.count";
   public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 9c07a9a..3766921 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2096,7 +2096,7 @@ public class RouterRpcServer extends AbstractService
           this.subclusterResolver.getDestinationForPath(path);
       if (location == null) {
         throw new IOException("Cannot find locations for " + path + " in " +
-            this.subclusterResolver);
+            this.subclusterResolver.getClass().getSimpleName());
       }
 
       // We may block some write operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index ed39d4b..3f56043 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -32,6 +32,14 @@
   </property>
 
   <property>
+    <name>dfs.federation.router.default.nameservice.enable</name>
+    <value>true</value>
+    <description>
+      The default subcluster is enabled to read and write files.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.federation.router.rpc.enable</name>
     <value>true</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index b19a973..5e3b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -175,6 +176,31 @@ public class TestMountTableResolver {
 
   }
 
+  @Test
+  public void testDefaultNameServiceEnable() throws IOException {
+    assertTrue(mountTable.isDefaultNSEnable());
+    mountTable.setDefaultNameService("3");
+    mountTable.removeEntry("/");
+
+    assertEquals("3->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+
+    Map<String, String> map = getMountTableEntry("4", "/unknown");
+    mountTable.addEntry(MountTable.newInstance("/unknown", map));
+    mountTable.setDefaultNSEnable(false);
+    assertFalse(mountTable.isDefaultNSEnable());
+
+    assertEquals("4->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+    try {
+      mountTable.getDestinationForPath("/");
+      fail("The getDestinationForPath call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "the default nameservice is disabled to read or write", ioe);
+    }
+  }
+
   private void compareLists(List<String> list1, String[] list2) {
     assertEquals(list1.size(), list2.length);
     for (String item : list2) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

Posted by in...@apache.org.
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54d3189c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54d3189c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54d3189c

Branch: refs/heads/branch-3.0
Commit: 54d3189cfaf58f366157088a7c6f3d44f026c485
Parents: 5514f02
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Sep 4 12:18:08 2018 -0700

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 39 +++++++++++++++++++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java      |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml     |  8 ++++
 .../resolver/TestMountTableResolver.java        | 26 +++++++++++++
 5 files changed, 76 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
         DFS_ROUTER_DEFAULT_NAMESERVICE,
         DFSUtil.getNamenodeNameServiceId(conf));
 
+    this.defaultNSEnable = conf.getBoolean(
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
     if (defaultNameService == null) {
       LOG.warn(
           "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
     }
 
     if (this.defaultNameService.equals("")) {
+      this.defaultNSEnable = false;
       LOG.warn("Default name service is not set.");
     } else {
-      LOG.info("Default name service: {}", this.defaultNameService);
+      String enable = this.defaultNSEnable ? "enabled" : "disabled";
+      LOG.info("Default name service: {}, {} to read or write",
+          this.defaultNameService, enable);
     }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
    * @param path Path to check/insert.
    * @return New remote location.
    */
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     MountTable entry = findDeepest(path);
     if (entry != null) {
       ret = buildLocation(path, entry);
     } else {
       // Not found, use default location
+      if (!defaultNSEnable) {
+        throw new IOException("Cannot find locations for " + path + ", " +
+            "because the default nameservice is disabled to read or write");
+      }
       RemoteLocation remoteLocation =
           new RemoteLocation(defaultNameService, path, path);
       List<RemoteLocation> locations =
@@ -623,4 +638,24 @@ public class MountTableResolver
     }
     throw new IOException("localCache is null");
   }
+
+  @VisibleForTesting
+  public String getDefaultNameService() {
+    return defaultNameService;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNameService(String defaultNameService) {
+    this.defaultNameService = defaultNameService;
+  }
+
+  @VisibleForTesting
+  public boolean isDefaultNSEnable() {
+    return defaultNSEnable;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNSEnable(boolean defaultNSRWEnable) {
+    this.defaultNSEnable = defaultNSRWEnable;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 0e317ea..09818c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -42,6 +42,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
       "dfs.federation.router.";
   public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
       FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE =
+      FEDERATION_ROUTER_PREFIX + "default.nameservice.enable";
+  public static final boolean DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT =
+      true;
   public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
       FEDERATION_ROUTER_PREFIX + "handler.count";
   public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 6aa33c6..47370b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2194,7 +2194,7 @@ public class RouterRpcServer extends AbstractService
           this.subclusterResolver.getDestinationForPath(path);
       if (location == null) {
         throw new IOException("Cannot find locations for " + path + " in " +
-            this.subclusterResolver);
+            this.subclusterResolver.getClass().getSimpleName());
       }
 
       // We may block some write operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index c20d1a7..2a101ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -32,6 +32,14 @@
   </property>
 
   <property>
+    <name>dfs.federation.router.default.nameservice.enable</name>
+    <value>true</value>
+    <description>
+      The default subcluster is enabled to read and write files.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.federation.router.rpc.enable</name>
     <value>true</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index b19a973..5e3b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -175,6 +176,31 @@ public class TestMountTableResolver {
 
   }
 
+  @Test
+  public void testDefaultNameServiceEnable() throws IOException {
+    assertTrue(mountTable.isDefaultNSEnable());
+    mountTable.setDefaultNameService("3");
+    mountTable.removeEntry("/");
+
+    assertEquals("3->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+
+    Map<String, String> map = getMountTableEntry("4", "/unknown");
+    mountTable.addEntry(MountTable.newInstance("/unknown", map));
+    mountTable.setDefaultNSEnable(false);
+    assertFalse(mountTable.isDefaultNSEnable());
+
+    assertEquals("4->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+    try {
+      mountTable.getDestinationForPath("/");
+      fail("The getDestinationForPath call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "the default nameservice is disabled to read or write", ioe);
+    }
+  }
+
   private void compareLists(List<String> list1, String[] list2) {
     assertEquals(list1.size(), list2.length);
     for (String item : list2) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

Posted by in...@apache.org.
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2656596
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2656596
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2656596

Branch: refs/heads/branch-3.1
Commit: a26565960a2d3394d4b31528241ee6ae2c85b91a
Parents: 142d878
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Sep 4 12:17:49 2018 -0700

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 39 +++++++++++++++++++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java      |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml     |  8 ++++
 .../resolver/TestMountTableResolver.java        | 26 +++++++++++++
 5 files changed, 76 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
         DFS_ROUTER_DEFAULT_NAMESERVICE,
         DFSUtil.getNamenodeNameServiceId(conf));
 
+    this.defaultNSEnable = conf.getBoolean(
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
     if (defaultNameService == null) {
       LOG.warn(
           "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
     }
 
     if (this.defaultNameService.equals("")) {
+      this.defaultNSEnable = false;
       LOG.warn("Default name service is not set.");
     } else {
-      LOG.info("Default name service: {}", this.defaultNameService);
+      String enable = this.defaultNSEnable ? "enabled" : "disabled";
+      LOG.info("Default name service: {}, {} to read or write",
+          this.defaultNameService, enable);
     }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
    * @param path Path to check/insert.
    * @return New remote location.
    */
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     MountTable entry = findDeepest(path);
     if (entry != null) {
       ret = buildLocation(path, entry);
     } else {
       // Not found, use default location
+      if (!defaultNSEnable) {
+        throw new IOException("Cannot find locations for " + path + ", " +
+            "because the default nameservice is disabled to read or write");
+      }
       RemoteLocation remoteLocation =
           new RemoteLocation(defaultNameService, path, path);
       List<RemoteLocation> locations =
@@ -623,4 +638,24 @@ public class MountTableResolver
     }
     throw new IOException("localCache is null");
   }
+
+  @VisibleForTesting
+  public String getDefaultNameService() {
+    return defaultNameService;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNameService(String defaultNameService) {
+    this.defaultNameService = defaultNameService;
+  }
+
+  @VisibleForTesting
+  public boolean isDefaultNSEnable() {
+    return defaultNSEnable;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNSEnable(boolean defaultNSRWEnable) {
+    this.defaultNSEnable = defaultNSRWEnable;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 997e1dd..bbd4250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -42,6 +42,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
       "dfs.federation.router.";
   public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
       FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE =
+      FEDERATION_ROUTER_PREFIX + "default.nameservice.enable";
+  public static final boolean DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT =
+      true;
   public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
       FEDERATION_ROUTER_PREFIX + "handler.count";
   public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index b0eb2aa..50ec670 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2235,7 +2235,7 @@ public class RouterRpcServer extends AbstractService
           this.subclusterResolver.getDestinationForPath(path);
       if (location == null) {
         throw new IOException("Cannot find locations for " + path + " in " +
-            this.subclusterResolver);
+            this.subclusterResolver.getClass().getSimpleName());
       }
 
       // We may block some write operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index ed39d4b..3f56043 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -32,6 +32,14 @@
   </property>
 
   <property>
+    <name>dfs.federation.router.default.nameservice.enable</name>
+    <value>true</value>
+    <description>
+      The default subcluster is enabled to read and write files.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.federation.router.rpc.enable</name>
     <value>true</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index b19a973..5e3b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -175,6 +176,31 @@ public class TestMountTableResolver {
 
   }
 
+  @Test
+  public void testDefaultNameServiceEnable() throws IOException {
+    assertTrue(mountTable.isDefaultNSEnable());
+    mountTable.setDefaultNameService("3");
+    mountTable.removeEntry("/");
+
+    assertEquals("3->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+
+    Map<String, String> map = getMountTableEntry("4", "/unknown");
+    mountTable.addEntry(MountTable.newInstance("/unknown", map));
+    mountTable.setDefaultNSEnable(false);
+    assertFalse(mountTable.isDefaultNSEnable());
+
+    assertEquals("4->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+    try {
+      mountTable.getDestinationForPath("/");
+      fail("The getDestinationForPath call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "the default nameservice is disabled to read or write", ioe);
+    }
+  }
+
   private void compareLists(List<String> list1, String[] list2) {
     assertEquals(list1.size(), list2.length);
     for (String item : list2) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[5/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

Posted by in...@apache.org.
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e869cbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e869cbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e869cbc

Branch: refs/heads/branch-2.9
Commit: 2e869cbce48d41d2e6d4550f0ce238a13f919a0c
Parents: 6ed97eb
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Sep 4 12:18:49 2018 -0700

----------------------------------------------------------------------
 .../federation/resolver/MountTableResolver.java | 39 +++++++++++++++++++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java      |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml     |  8 ++++
 .../resolver/TestMountTableResolver.java        | 26 +++++++++++++
 5 files changed, 76 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
         DFS_ROUTER_DEFAULT_NAMESERVICE,
         DFSUtil.getNamenodeNameServiceId(conf));
 
+    this.defaultNSEnable = conf.getBoolean(
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+        DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
     if (defaultNameService == null) {
       LOG.warn(
           "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
     }
 
     if (this.defaultNameService.equals("")) {
+      this.defaultNSEnable = false;
       LOG.warn("Default name service is not set.");
     } else {
-      LOG.info("Default name service: {}", this.defaultNameService);
+      String enable = this.defaultNSEnable ? "enabled" : "disabled";
+      LOG.info("Default name service: {}, {} to read or write",
+          this.defaultNameService, enable);
     }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
    * @param path Path to check/insert.
    * @return New remote location.
    */
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     MountTable entry = findDeepest(path);
     if (entry != null) {
       ret = buildLocation(path, entry);
     } else {
       // Not found, use default location
+      if (!defaultNSEnable) {
+        throw new IOException("Cannot find locations for " + path + ", " +
+            "because the default nameservice is disabled to read or write");
+      }
       RemoteLocation remoteLocation =
           new RemoteLocation(defaultNameService, path, path);
       List<RemoteLocation> locations =
@@ -623,4 +638,24 @@ public class MountTableResolver
     }
     throw new IOException("localCache is null");
   }
+
+  @VisibleForTesting
+  public String getDefaultNameService() {
+    return defaultNameService;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNameService(String defaultNameService) {
+    this.defaultNameService = defaultNameService;
+  }
+
+  @VisibleForTesting
+  public boolean isDefaultNSEnable() {
+    return defaultNSEnable;
+  }
+
+  @VisibleForTesting
+  public void setDefaultNSEnable(boolean defaultNSRWEnable) {
+    this.defaultNSEnable = defaultNSRWEnable;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 0e317ea..09818c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -42,6 +42,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
       "dfs.federation.router.";
   public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
       FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE =
+      FEDERATION_ROUTER_PREFIX + "default.nameservice.enable";
+  public static final boolean DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT =
+      true;
   public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
       FEDERATION_ROUTER_PREFIX + "handler.count";
   public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 3c753e1..858c372 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2085,7 +2085,7 @@ public class RouterRpcServer extends AbstractService
           this.subclusterResolver.getDestinationForPath(path);
       if (location == null) {
         throw new IOException("Cannot find locations for " + path + " in " +
-            this.subclusterResolver);
+            this.subclusterResolver.getClass().getSimpleName());
       }
 
       // We may block some write operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
index c20d1a7..2a101ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -32,6 +32,14 @@
   </property>
 
   <property>
+    <name>dfs.federation.router.default.nameservice.enable</name>
+    <value>true</value>
+    <description>
+      The default subcluster is enabled to read and write files.
+    </description>
+  </property>
+
+  <property>
     <name>dfs.federation.router.rpc.enable</name>
     <value>true</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index b19a973..5e3b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -175,6 +176,31 @@ public class TestMountTableResolver {
 
   }
 
+  @Test
+  public void testDefaultNameServiceEnable() throws IOException {
+    assertTrue(mountTable.isDefaultNSEnable());
+    mountTable.setDefaultNameService("3");
+    mountTable.removeEntry("/");
+
+    assertEquals("3->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+
+    Map<String, String> map = getMountTableEntry("4", "/unknown");
+    mountTable.addEntry(MountTable.newInstance("/unknown", map));
+    mountTable.setDefaultNSEnable(false);
+    assertFalse(mountTable.isDefaultNSEnable());
+
+    assertEquals("4->/unknown",
+        mountTable.getDestinationForPath("/unknown").toString());
+    try {
+      mountTable.getDestinationForPath("/");
+      fail("The getDestinationForPath call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "the default nameservice is disabled to read or write", ioe);
+    }
+  }
+
   private void compareLists(List<String> list1, String[] list2) {
     assertEquals(list1.size(), list2.length);
     for (String item : list2) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org