You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2019/08/24 03:32:35 UTC
[hadoop] branch trunk updated: HDFS-14722. RBF: GetMountPointStatus
should return mountTable information when getFileInfoAll throw IOException.
Contributed by xuzq.
This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new d2225c8 HDFS-14722. RBF: GetMountPointStatus should return mountTable information when getFileInfoAll throw IOException. Contributed by xuzq.
d2225c8 is described below
commit d2225c8ca8f9bdc5cef7266697518394d8763c88
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Sat Aug 24 08:47:43 2019 +0530
HDFS-14722. RBF: GetMountPointStatus should return mountTable information when getFileInfoAll throw IOException. Contributed by xuzq.
---
.../federation/router/RouterClientProtocol.java | 8 +--
.../federation/router/TestRouterMountTable.java | 57 ++++++++++++++++++++++
2 files changed, 61 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 7df54c1..9b75b48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1913,6 +1913,10 @@ public class RouterClientProtocol implements ClientProtocol {
MountTableResolver mountTable = (MountTableResolver) subclusterResolver;
MountTable entry = mountTable.getMountPoint(mName);
if (entry != null) {
+ permission = entry.getMode();
+ owner = entry.getOwnerName();
+ group = entry.getGroupName();
+
RemoteMethod method = new RemoteMethod("getFileInfo",
new Class<?>[] {String.class}, new RemoteParam());
HdfsFileStatus fInfo = getFileInfoAll(
@@ -1922,10 +1926,6 @@ public class RouterClientProtocol implements ClientProtocol {
owner = fInfo.getOwner();
group = fInfo.getGroup();
childrenNum = fInfo.getChildrenNum();
- } else {
- permission = entry.getMode();
- owner = entry.getOwnerName();
- group = entry.getGroupName();
}
}
} catch (IOException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index b745ecd..b66ff5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.After;
@@ -255,6 +257,61 @@ public class TestRouterMountTable {
}
/**
+ * Verify that the file/dir status with IOException in getMountPointStatus.
+ */
+ @Test
+ public void testGetMountPointStatusWithIOException()
+ throws IOException, InterruptedException {
+ try {
+ // Add mount table entry.
+ MountTable addEntry = MountTable.newInstance("/testA",
+ Collections.singletonMap("ns0", "/testA"));
+ assertTrue(addMountTable(addEntry));
+ addEntry = MountTable.newInstance("/testA/testB",
+ Collections.singletonMap("ns0", "/testA/testB"));
+ assertTrue(addMountTable(addEntry));
+ addEntry = MountTable.newInstance("/testB",
+ Collections.singletonMap("ns0", "/test1/testB"));
+ addEntry.setOwnerName("userB");
+ addEntry.setGroupName("groupB");
+ assertTrue(addMountTable(addEntry));
+
+ assertTrue(nnFs0.mkdirs(new Path("/test1")));
+ nnFs0.setPermission(new Path("/test1"),
+ FsPermission.createImmutable((short) 0700));
+
+ // Use mock user to getListing through router.
+ UserGroupInformation user = UserGroupInformation.createUserForTesting(
+ "mock_user", new String[] {"mock_group"});
+ LambdaTestUtils.doAs(user, () -> getListing("/testA"));
+ } finally {
+ nnFs0.delete(new Path("/test1"), true);
+ }
+ }
+
+ /**
+ * GetListing of testPath through router.
+ */
+ private void getListing(String testPath)
+ throws IOException, URISyntaxException {
+ ClientProtocol clientProtocol1 =
+ routerContext.getClient().getNamenode();
+ DirectoryListing listing = clientProtocol1.getListing(testPath,
+ HdfsFileStatus.EMPTY_NAME, false);
+
+ assertEquals(1, listing.getPartialListing().length);
+ HdfsFileStatus fileStatus = listing.getPartialListing()[0];
+ String currentOwner = fileStatus.getOwner();
+ String currentGroup = fileStatus.getGroup();
+ String currentFileName =
+ fileStatus.getFullPath(new Path("/")).getName();
+
+ assertEquals("testB", currentFileName);
+ assertEquals("userB", currentOwner);
+ assertEquals("groupB", currentGroup);
+ }
+
+ /**
* Verify permission for a mount point when the actual destination is not
* present. It returns the permissions of the mount point.
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org