You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by um...@apache.org on 2020/07/31 08:42:21 UTC

[hadoop] branch branch-3.1 updated: HDFS-15464: ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links (#2132). Contributed by Uma Maheswara Rao G.

This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 544602e  HDFS-15464: ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links (#2132). Contributed by Uma Maheswara Rao G.
544602e is described below

commit 544602e3d16a9a6e47c8851444f682d1fd4491d9
Author: Uma Maheswara Rao G <um...@apache.org>
AuthorDate: Sat Jul 11 23:50:04 2020 -0700

    HDFS-15464: ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links (#2132). Contributed by Uma Maheswara Rao G.
    
    (cherry picked from commit 3e700066394fb9f516e23537d8abb4661409cae1)
---
 .../java/org/apache/hadoop/fs/FsConstants.java     |  2 ++
 .../org/apache/hadoop/fs/viewfs/InodeTree.java     | 22 +++++++++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java    | 13 +++++++-
 .../fs/viewfs/ViewFileSystemOverloadScheme.java    | 12 +++++++
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   | 16 +++++++--
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  2 +-
 .../viewfs/TestViewFsOverloadSchemeListStatus.java | 39 ++++++++++++++++------
 .../src/site/markdown/ViewFsOverloadScheme.md      |  3 +-
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java | 20 +++++++----
 9 files changed, 102 insertions(+), 27 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
index 07c16b2..344048f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
@@ -44,4 +44,6 @@ public interface FsConstants {
   public static final String VIEWFS_SCHEME = "viewfs";
   String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN =
       "fs.viewfs.overload.scheme.target.%s.impl";
+  String VIEWFS_TYPE = "viewfs";
+  String VIEWFSOS_TYPE = "viewfsOverloadScheme";
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 3d709b1..422e733 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -67,7 +68,7 @@ abstract class InodeTree<T> {
   // the root of the mount table
   private final INode<T> root;
   // the fallback filesystem
-  private final INodeLink<T> rootFallbackLink;
+  private INodeLink<T> rootFallbackLink;
   // the homedir for this mount table
   private final String homedirPrefix;
   private List<MountPoint<T>> mountPoints = new ArrayList<MountPoint<T>>();
@@ -460,7 +461,8 @@ abstract class InodeTree<T> {
    * @throws FileAlreadyExistsException
    * @throws IOException
    */
-  protected InodeTree(final Configuration config, final String viewName)
+  protected InodeTree(final Configuration config, final String viewName,
+      final URI theUri, boolean initingUriAsFallbackOnNoMounts)
       throws UnsupportedFileSystemException, URISyntaxException,
       FileAlreadyExistsException, IOException {
     String mountTableName = viewName;
@@ -596,9 +598,19 @@ abstract class InodeTree<T> {
     }
 
     if (!gotMountTableEntry) {
-      throw new IOException(
-          "ViewFs: Cannot initialize: Empty Mount table in config for " +
-              "viewfs://" + mountTableName + "/");
+      if (!initingUriAsFallbackOnNoMounts) {
+        throw new IOException(
+            "ViewFs: Cannot initialize: Empty Mount table in config for "
+                + "viewfs://" + mountTableName + "/");
+      }
+      StringBuilder msg =
+          new StringBuilder("Empty mount table detected for ").append(theUri)
+              .append(" and considering itself as a linkFallback.");
+      FileSystem.LOG.info(msg.toString());
+      rootFallbackLink =
+          new INodeLink<T>(mountTableName, ugi, getTargetFileSystem(theUri),
+              theUri);
+      getRootDir().addFallbackLink(rootFallbackLink);
     }
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 1ca1759..484581c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -257,6 +257,14 @@ public class ViewFileSystem extends FileSystem {
   }
 
   /**
+   * Returns the ViewFileSystem type.
+   * @return <code>viewfs</code>
+   */
+  String getType() {
+    return FsConstants.VIEWFS_TYPE;
+  }
+
+  /**
    * Called after a new FileSystem instance is constructed.
    * @param theUri a uri whose authority section names the host, port, etc. for
    *        this FileSystem
@@ -282,7 +290,10 @@ public class ViewFileSystem extends FileSystem {
     }
     try {
       myUri = new URI(getScheme(), authority, "/", null, null);
-      fsState = new InodeTree<FileSystem>(conf, tableName) {
+      boolean initingUriAsFallbackOnNoMounts =
+          !FsConstants.VIEWFS_TYPE.equals(getType());
+      fsState = new InodeTree<FileSystem>(conf, tableName, theUri,
+          initingUriAsFallbackOnNoMounts) {
         @Override
         protected FileSystem getTargetFileSystem(final URI uri)
           throws URISyntaxException, IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
index 2f3359d..2165a3f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
@@ -95,6 +95,10 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN
  * be considered as the mount table name. When the passed uri has hostname:port,
  * it will simply ignore the port number and only hostname will be considered as
  * the mount table name.
+ * (3) If there are no mount links configured with the initializing uri's
+ * hostname as the mount table name, then it will automatically consider the
+ * current uri as fallback( ex: fs.viewfs.mounttable.<mycluster>.linkFallBack)
+ * target fs uri.
  *****************************************************************************/
 @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
 @InterfaceStability.Evolving
@@ -109,6 +113,14 @@ public class ViewFileSystemOverloadScheme extends ViewFileSystem {
     return myUri.getScheme();
   }
 
+  /**
+   * Returns the ViewFileSystem type.
+   * @return <code>viewfs</code>
+   */
+  String getType() {
+    return FsConstants.VIEWFSOS_TYPE;
+  }
+
   @Override
   public void initialize(URI theUri, Configuration conf) throws IOException {
     this.myUri = theUri;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 598a66d..4e40bc6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -196,7 +196,16 @@ public class ViewFs extends AbstractFileSystem {
       return targets;
     }
   }
-  
+
+  /**
+   * Returns the ViewFileSystem type.
+   *
+   * @return <code>viewfs</code>
+   */
+  String getType() {
+    return FsConstants.VIEWFS_TYPE;
+  }
+
   public ViewFs(final Configuration conf) throws IOException,
       URISyntaxException {
     this(FsConstants.VIEWFS_URI, conf);
@@ -222,7 +231,10 @@ public class ViewFs extends AbstractFileSystem {
             CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT);
     // Now build  client side view (i.e. client side mount table) from config.
     String authority = theUri.getAuthority();
-    fsState = new InodeTree<AbstractFileSystem>(conf, authority) {
+    boolean initingUriAsFallbackOnNoMounts =
+        !FsConstants.VIEWFS_TYPE.equals(getType());
+    fsState = new InodeTree<AbstractFileSystem>(conf, authority, theUri,
+        initingUriAsFallbackOnNoMounts) {
 
       @Override
       protected AbstractFileSystem getTargetFileSystem(final URI uri)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java
index 136837f..56f5b2d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java
@@ -39,7 +39,7 @@ public class TestViewFsConfig {
     class Foo {
     }
 
-    new InodeTree<Foo>(conf, null) {
+    new InodeTree<Foo>(conf, null, null, false) {
 
       @Override
       protected Foo getTargetFileSystem(final URI uri) {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
index 0cf6914..300fdd8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
@@ -46,9 +46,17 @@ public class TestViewFsOverloadSchemeListStatus {
 
   private static final File TEST_DIR =
       GenericTestUtils.getTestDir(TestViewfsFileStatus.class.getSimpleName());
+  private Configuration conf;
+  private static final String FILE_NAME = "file";
 
   @Before
   public void setUp() {
+    conf = new Configuration();
+    conf.set(String.format("fs.%s.impl", FILE_NAME),
+        ViewFileSystemOverloadScheme.class.getName());
+    conf.set(String
+        .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN,
+            FILE_NAME), LocalFileSystem.class.getName());
     FileUtil.fullyDelete(TEST_DIR);
     assertTrue(TEST_DIR.mkdirs());
   }
@@ -77,15 +85,9 @@ public class TestViewFsOverloadSchemeListStatus {
     File childDir = new File(TEST_DIR, childDirectoryName);
     childDir.mkdirs();
 
-    Configuration conf = new Configuration();
     ConfigUtil.addLink(conf, "/file", infile.toURI());
     ConfigUtil.addLink(conf, "/dir", childDir.toURI());
-    String fileScheme = "file";
-    conf.set(String.format("fs.%s.impl", fileScheme),
-        ViewFileSystemOverloadScheme.class.getName());
-    conf.set(String
-        .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN,
-            fileScheme), LocalFileSystem.class.getName());
+
     String fileUriStr = "file:///";
     try (FileSystem vfs = FileSystem.get(new URI(fileUriStr), conf)) {
       assertEquals(ViewFileSystemOverloadScheme.class, vfs.getClass());
@@ -95,9 +97,8 @@ public class TestViewFsOverloadSchemeListStatus {
           .getRawFileSystem(new Path(fileUriStr), conf);
       FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath()));
       FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath()));
-
       for (FileStatus status : statuses) {
-        if (status.getPath().getName().equals(fileScheme)) {
+        if (status.getPath().getName().equals(FILE_NAME)) {
           assertEquals(fileStat.getPermission(), status.getPermission());
         } else {
           assertEquals(dirStat.getPermission(), status.getPermission());
@@ -111,7 +112,7 @@ public class TestViewFsOverloadSchemeListStatus {
 
       statuses = vfs.listStatus(new Path("/"));
       for (FileStatus status : statuses) {
-        if (status.getPath().getName().equals(fileScheme)) {
+        if (status.getPath().getName().equals(FILE_NAME)) {
           assertEquals(FsPermission.valueOf("-rwxr--r--"),
               status.getPermission());
           assertFalse(status.isDirectory());
@@ -124,6 +125,24 @@ public class TestViewFsOverloadSchemeListStatus {
     }
   }
 
+  /**
+   * Tests that ViewFSOverloadScheme should consider initialized fs as fallback
+   * if there are no mount links configured.
+   */
+  @Test(timeout = 30000)
+  public void testViewFSOverloadSchemeWithoutAnyMountLinks() throws Exception {
+    try (FileSystem fs = FileSystem.get(TEST_DIR.toPath().toUri(), conf)) {
+      ViewFileSystemOverloadScheme vfs = (ViewFileSystemOverloadScheme) fs;
+      assertEquals(0, vfs.getMountPoints().length);
+      Path testFallBack = new Path("test", FILE_NAME);
+      assertTrue(vfs.mkdirs(testFallBack));
+      FileStatus[] status = vfs.listStatus(testFallBack.getParent());
+      assertEquals(FILE_NAME, status[0].getPath().getName());
+      assertEquals(testFallBack.getName(),
+          vfs.getFileLinkStatus(testFallBack).getPath().getName());
+    }
+  }
+
   @AfterClass
   public static void cleanup() throws IOException {
     FileUtil.fullyDelete(TEST_DIR);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
index 38113cb..564bc03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
@@ -33,8 +33,9 @@ Mount link configurations key, value formats are same as in [ViewFS Guide](./Vie
 If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the ViewFileSystemOverloadScheme initialized uri's hostname as the mount table name.
 Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.<mountLinkPath>`.
 Even if the initialized fs uri has hostname:port, it will simply ignore the port number and only consider the hostname as the mount table name.  We will discuss more example configurations in following sections.
+If there are no mount links configured with the initializing uri's hostname as the mount table name, then it will automatically consider the current uri as fallback(`fs.viewfs.mounttable.*mycluster*.linkFallback`) target fs uri.
 
-Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place.
+Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead, they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place.
 
 ### Enabling View File System Overload Scheme
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
index aea4704..39df141 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
@@ -228,16 +228,22 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
   }
 
   /**
-   * Tests safemode with ViewFSOverloadScheme, but without mounttables.
+   * Tests safemode get with ViewFSOverloadScheme, but without any mount links
+   * configured. The ViewFSOverloadScheme should consider initialized fs as
+   * fallback fs automatically.
    */
   @Test
-  public void testSafeModeShouldFailWithoutMountTables() throws Exception {
+  public void testGetSafemodeWithoutMountLinksConfigured() throws Exception {
     final DFSAdmin dfsAdmin = new DFSAdmin(conf);
-    String uri = defaultFSURI.toString();
-    redirectStream();
-    int ret = ToolRunner.run(dfsAdmin,
-        new String[] {"-fs", uri, "-safemode", "enter" });
-    assertEquals(-1, ret);
+    try {
+      redirectStream();
+      int ret = ToolRunner.run(dfsAdmin,
+          new String[] {"-fs", defaultFSURI.toString(), "-safemode", "get"});
+      assertOutMsg("Safe mode is OFF", 0);
+      assertEquals(0, ret);
+    } finally {
+      dfsAdmin.close();
+    }
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org