You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2021/04/02 09:21:10 UTC

[hadoop] branch branch-3.3 updated (57d2fff -> d973f37)

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from 57d2fff  HDFS-15246. ArrayIndexOfboundsException in BlockManager CreateLocatedBlock. Contributed by Hemanth Boyina.
     new 327488d  HDFS-15196. RBF: RouterRpcServer getListing cannot list large dirs correctly. Contributed by Fengnan Li.
     new 5bfc3a4  HDFS-15198. RBF: Add test for MountTableRefresherService failed to refresh other router MountTableEntries in secure mode. Contributed by zhengchenyu.
     new a549b4a  HDFS-15300. RBF: updateActiveNamenode() is invalid when RPC address is IP. Contributed by xuzq.
     new f5cc154  HDFS-15510. RBF: Quota and Content Summary was not correct in Multiple Destinations. Contributed by Hemanth Boyina.
     new 2da3356  HDFS-15591. RBF: Fix webHdfs file display error. Contributed by wangzhaohui.
     new 14fddba  HDFS-15252. HttpFS: setWorkingDirectory should not accept invalid paths. Contributed by hemanthboyina.
     new 02d37c7  HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina
     new 887948d  HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina.
     new d973f37  HDFS-15362. FileWithSnapshotFeature#updateQuotaAndCollectBlocks should collect all distinct blocks. Contributed by hemanthboyina.

The 9 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../main/java/org/apache/hadoop/net/NetUtils.java  |  16 ++
 .../hadoop/fs/http/client/HttpFSFileSystem.java    |   6 +
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  12 +-
 .../resolver/MembershipNamenodeResolver.java       |   4 +-
 .../federation/router/RouterClientProtocol.java    |  79 +++++++-
 .../server/federation/FederationTestUtils.java     |  13 +-
 .../hdfs/server/federation/MockResolver.java       |  32 +++-
 .../federation/resolver/TestNamenodeResolver.java  |  17 ++
 .../federation/router/TestRouterMountTable.java    |  31 +++
 ...=> TestRouterMountTableCacheRefreshSecure.java} | 208 ++++++++-------------
 ...erRPCMultipleDestinationMountTableResolver.java |  57 ++++++
 .../server/federation/router/TestRouterRpc.java    |  59 ++++++
 .../hdfs/server/blockmanagement/BlockManager.java  |   9 +-
 .../hadoop/hdfs/server/namenode/FSDirDeleteOp.java |   4 +-
 .../namenode/snapshot/FileWithSnapshotFeature.java |   6 +-
 .../hadoop/hdfs/TestBlocksScheduledCounter.java    |  56 ++++++
 .../hdfs/server/namenode/TestDeleteRace.java       |  46 +++++
 .../snapshot/TestFileWithSnapshotFeature.java      |  73 ++++++++
 18 files changed, 569 insertions(+), 159 deletions(-)
 copy hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/{TestRouterMountTableCacheRefresh.java => TestRouterMountTableCacheRefreshSecure.java} (65%)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 09/09: HDFS-15362. FileWithSnapshotFeature#updateQuotaAndCollectBlocks should collect all distinct blocks. Contributed by hemanthboyina.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit d973f370547a29f575540595bf79d66057b9a3a5
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Wed May 27 11:06:13 2020 -0700

    HDFS-15362. FileWithSnapshotFeature#updateQuotaAndCollectBlocks should collect all distinct blocks. Contributed by hemanthboyina.
    
    (cherry picked from commit 2148a8fe645333444c4e8110bb56acf0fb8e41b4)
---
 .../namenode/snapshot/FileWithSnapshotFeature.java |  6 +-
 .../snapshot/TestFileWithSnapshotFeature.java      | 73 ++++++++++++++++++++++
 2 files changed, 77 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 44c258c..5263ef3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
@@ -156,7 +157,8 @@ public class FileWithSnapshotFeature implements INode.Feature {
     QuotaCounts oldCounts;
     if (removed.snapshotINode != null) {
       oldCounts = new QuotaCounts.Builder().build();
-      List<BlockInfo> allBlocks = new ArrayList<BlockInfo>();
+      // collect all distinct blocks
+      Set<BlockInfo> allBlocks = new HashSet<BlockInfo>();
       if (file.getBlocks() != null) {
         allBlocks.addAll(Arrays.asList(file.getBlocks()));
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
index d91fea9..dd1830f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -30,11 +30,13 @@ import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import java.util.ArrayList;
 
 import static org.apache.hadoop.fs.StorageType.DISK;
 import static org.apache.hadoop.fs.StorageType.SSD;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.anyByte;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -94,4 +96,75 @@ public class TestFileWithSnapshotFeature {
     Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
   }
 
+  /**
+   * Test update quota with same blocks.
+   */
+  @Test
+  public void testUpdateQuotaDistinctBlocks() {
+    BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
+    BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
+    BlockInfo[] blocks = new BlockInfo[] {
+        new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_3) };
+
+    INodeFile file = mock(INodeFile.class);
+    when(file.getBlocks()).thenReturn(blocks);
+    when(file.getStoragePolicyID()).thenReturn((byte) 1);
+    when(file.getPreferredBlockReplication()).thenReturn((short) 3);
+
+    when(bsps.getPolicy(anyByte())).thenReturn(bsp);
+    INode.BlocksMapUpdateInfo collectedBlocks =
+        mock(INode.BlocksMapUpdateInfo.class);
+    ArrayList<INode> removedINodes = new ArrayList<>();
+    INode.ReclaimContext ctx =
+        new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
+    QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
+    INodeFile snapshotINode = mock(INodeFile.class);
+
+    // add same blocks in file diff
+    FileDiff diff1 = new FileDiff(0, snapshotINode, null, 0);
+    FileDiff diff = Mockito.spy(diff1);
+    Mockito.doReturn(blocks).when(diff).getBlocks();
+
+    // removed file diff
+    FileDiff removed = new FileDiff(0, snapshotINode, null, 0);
+
+    // remaining file diffs
+    FileDiffList diffs = new FileDiffList();
+    diffs.addFirst(diff);
+    FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
+
+    // update quota and collect same blocks in file and file diff
+    when(file.getFileWithSnapshotFeature()).thenReturn(sf);
+    sf.updateQuotaAndCollectBlocks(ctx, file, removed);
+    counts = ctx.quotaDelta().getCountsCopy();
+    assertEquals(0, counts.getStorageSpace());
+
+    // different blocks in file and file's diff and in removed diff
+    BlockInfo[] blocks1 = new BlockInfo[] {
+        new BlockInfoContiguous(new Block(2, BLOCK_SIZE, 1), REPL_3) };
+    Mockito.doReturn(blocks1).when(diff).getBlocks();
+    // remaining file diffs
+    FileDiffList diffs1 = new FileDiffList();
+    diffs1.addFirst(diff);
+    FileWithSnapshotFeature sf1 = new FileWithSnapshotFeature(diffs1);
+    when(file.getFileWithSnapshotFeature()).thenReturn(sf1);
+    BlockInfo[] removedBlocks = new BlockInfo[] {
+        new BlockInfoContiguous(new Block(3, BLOCK_SIZE, 1), REPL_3) };
+    FileDiff removed1 = new FileDiff(0, snapshotINode, null, 1024);
+    removed1.setBlocks(removedBlocks);
+    INode.ReclaimContext ctx1 =
+        new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
+    sf1.updateQuotaAndCollectBlocks(ctx1, file, removed1);
+    counts = ctx1.quotaDelta().getCountsCopy();
+    assertEquals(3072, counts.getStorageSpace());
+
+    // same blocks in file and removed diff
+    removed1 = new FileDiff(0, snapshotINode, null, 1024);
+    removed1.setBlocks(blocks);
+    INode.ReclaimContext ctx2 =
+        new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
+    sf1.updateQuotaAndCollectBlocks(ctx2, file, removed1);
+    counts = ctx2.quotaDelta().getCountsCopy();
+    assertEquals(0, counts.getStorageSpace());
+  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 04/09: HDFS-15510. RBF: Quota and Content Summary was not correct in Multiple Destinations. Contributed by Hemanth Boyina.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f5cc1540b4510b6da72b46d075b4463e352d4848
Author: Takanobu Asanuma <ta...@apache.org>
AuthorDate: Thu Aug 27 12:10:39 2020 +0900

    HDFS-15510. RBF: Quota and Content Summary was not correct in Multiple Destinations. Contributed by Hemanth Boyina.
    
    (cherry picked from commit ca8e7a77256003e11ab7e3d079ee4cf9f50080dd)
---
 .../federation/router/RouterClientProtocol.java    |  6 ++-
 ...erRPCMultipleDestinationMountTableResolver.java | 57 ++++++++++++++++++++++
 2 files changed, 61 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index ec61258..2a0fe7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1857,6 +1857,8 @@ public class RouterClientProtocol implements ClientProtocol {
 
   /**
    * Aggregate content summaries for each subcluster.
+   * If the mount point has multiple destinations
+   * add the quota set value only once.
    *
    * @param summaries Collection of individual summaries.
    * @return Aggregated content summary.
@@ -1879,9 +1881,9 @@ public class RouterClientProtocol implements ClientProtocol {
       length += summary.getLength();
       fileCount += summary.getFileCount();
       directoryCount += summary.getDirectoryCount();
-      quota += summary.getQuota();
+      quota = summary.getQuota();
       spaceConsumed += summary.getSpaceConsumed();
-      spaceQuota += summary.getSpaceQuota();
+      spaceQuota = summary.getSpaceQuota();
       // We return from the first response as we assume that the EC policy
       // of each sub-cluster is same.
       if (ecPolicy.isEmpty()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
index bcab7bb..e644dec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
@@ -550,6 +550,63 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
     assertEquals(-1, cs1.getSpaceQuota());
   }
 
+  @Test
+  public void testContentSummaryWithMultipleDest() throws Exception {
+    MountTable addEntry;
+    long nsQuota = 5;
+    long ssQuota = 100;
+    Path path = new Path("/testContentSummaryWithMultipleDest");
+    Map<String, String> destMap = new HashMap<>();
+    destMap.put("ns0", "/testContentSummaryWithMultipleDest");
+    destMap.put("ns1", "/testContentSummaryWithMultipleDest");
+    nnFs0.mkdirs(path);
+    nnFs1.mkdirs(path);
+    addEntry =
+        MountTable.newInstance("/testContentSummaryWithMultipleDest", destMap);
+    addEntry.setQuota(
+        new RouterQuotaUsage.Builder().quota(nsQuota).spaceQuota(ssQuota)
+            .build());
+    assertTrue(addMountTable(addEntry));
+    RouterQuotaUpdateService updateService =
+        routerContext.getRouter().getQuotaCacheUpdateService();
+    updateService.periodicInvoke();
+    ContentSummary cs = routerFs.getContentSummary(path);
+    assertEquals(nsQuota, cs.getQuota());
+    assertEquals(ssQuota, cs.getSpaceQuota());
+    ContentSummary ns0Cs = nnFs0.getContentSummary(path);
+    assertEquals(nsQuota, ns0Cs.getQuota());
+    assertEquals(ssQuota, ns0Cs.getSpaceQuota());
+    ContentSummary ns1Cs = nnFs1.getContentSummary(path);
+    assertEquals(nsQuota, ns1Cs.getQuota());
+    assertEquals(ssQuota, ns1Cs.getSpaceQuota());
+  }
+
+  @Test
+  public void testContentSummaryMultipleDestWithMaxValue()
+      throws Exception {
+    MountTable addEntry;
+    long nsQuota = Long.MAX_VALUE - 2;
+    long ssQuota = Long.MAX_VALUE - 2;
+    Path path = new Path("/testContentSummaryMultipleDestWithMaxValue");
+    Map<String, String> destMap = new HashMap<>();
+    destMap.put("ns0", "/testContentSummaryMultipleDestWithMaxValue");
+    destMap.put("ns1", "/testContentSummaryMultipleDestWithMaxValue");
+    nnFs0.mkdirs(path);
+    nnFs1.mkdirs(path);
+    addEntry = MountTable
+        .newInstance("/testContentSummaryMultipleDestWithMaxValue", destMap);
+    addEntry.setQuota(
+        new RouterQuotaUsage.Builder().quota(nsQuota).spaceQuota(ssQuota)
+            .build());
+    assertTrue(addMountTable(addEntry));
+    RouterQuotaUpdateService updateService =
+        routerContext.getRouter().getQuotaCacheUpdateService();
+    updateService.periodicInvoke();
+    ContentSummary cs = routerFs.getContentSummary(path);
+    assertEquals(nsQuota, cs.getQuota());
+    assertEquals(ssQuota, cs.getSpaceQuota());
+  }
+
   /**
    * Test to verify rename operation on directories in case of multiple
    * destinations.

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 08/09: HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 887948d12715a32228a8196db4b32a0a2394a0a1
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Sat Jun 13 09:35:05 2020 -0700

    HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina.
    
    (cherry picked from commit 719b53a79dc169a8c52229831dcb011935a8a151)
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  9 +++-
 .../hadoop/hdfs/TestBlocksScheduledCounter.java    | 56 ++++++++++++++++++++++
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7461d46..6fd2343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1280,7 +1280,14 @@ public class BlockManager implements BlockStatsMXBean {
     neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
         replicas.readOnlyReplicas(),
         replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock));
-    pendingReconstruction.remove(lastBlock);
+    PendingBlockInfo remove = pendingReconstruction.remove(lastBlock);
+    if (remove != null) {
+      List<DatanodeStorageInfo> locations = remove.getTargets();
+      DatanodeStorageInfo[] removedBlockTargets =
+          new DatanodeStorageInfo[locations.size()];
+      locations.toArray(removedBlockTargets);
+      DatanodeStorageInfo.decrementBlocksScheduled(removedBlockTargets);
+    }
 
     // remove this block from the list of pending blocks to be deleted. 
     for (DatanodeStorageInfo storage : targets) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
index 95f4e2c..95d6825 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
@@ -202,4 +202,60 @@ public class TestBlocksScheduledCounter {
     }
   }
 
+  /**
+   * Test Block Scheduled counter on truncating a file.
+   * @throws Exception
+   */
+  @Test
+  public void testBlocksScheduledCounterOnTruncate() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster.waitActive();
+    BlockManager bm = cluster.getNamesystem().getBlockManager();
+    try {
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // 1. stop a datanode
+      cluster.stopDataNode(0);
+
+      // 2. create a file
+      Path filePath = new Path("/tmp");
+      DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 0L);
+
+      DatanodeManager datanodeManager =
+          cluster.getNamesystem().getBlockManager().getDatanodeManager();
+      ArrayList<DatanodeDescriptor> dnList =
+          new ArrayList<DatanodeDescriptor>();
+      datanodeManager.fetchDatanodes(dnList, dnList, false);
+
+      // 3. restart the stopped datanode
+      cluster.restartDataNode(0);
+
+      // 4. disable the heartbeats
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+      }
+
+      cluster.getNamesystem().writeLock();
+      try {
+        BlockManagerTestUtil.computeAllPendingWork(bm);
+        BlockManagerTestUtil.updateState(bm);
+        assertEquals(1L, bm.getPendingReconstructionBlocksCount());
+      } finally {
+        cluster.getNamesystem().writeUnlock();
+      }
+
+      // 5.truncate the file whose block exists in pending reconstruction
+      dfs.truncate(filePath, 10);
+      int blocksScheduled = 0;
+      for (DatanodeDescriptor descriptor : dnList) {
+        if (descriptor.getBlocksScheduled() != 0) {
+          blocksScheduled += descriptor.getBlocksScheduled();
+        }
+      }
+      assertEquals(0, blocksScheduled);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }
\ No newline at end of file

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/09: HDFS-15196. RBF: RouterRpcServer getListing cannot list large dirs correctly. Contributed by Fengnan Li.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 327488d688da3ad2f2c4ab74c1e7dbfaac8d3d36
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Mon Mar 30 12:29:21 2020 -0700

    HDFS-15196. RBF: RouterRpcServer getListing cannot list large dirs correctly. Contributed by Fengnan Li.
    
    (cherry picked from commit 80b877a72f52ef0f4acafe15db55b8ed61fbe6d2)
---
 .../federation/router/RouterClientProtocol.java    | 66 ++++++++++++++++++++--
 .../hdfs/server/federation/MockResolver.java       | 32 ++++++++---
 .../server/federation/router/TestRouterRpc.java    | 59 +++++++++++++++++++
 3 files changed, 142 insertions(+), 15 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index baee979..ec61258 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -103,6 +103,7 @@ import java.io.IOException;
 import java.net.ConnectException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -771,13 +772,14 @@ public class RouterClientProtocol implements ClientProtocol {
 
     List<RemoteResult<RemoteLocation, DirectoryListing>> listings =
         getListingInt(src, startAfter, needLocation);
-    Map<String, HdfsFileStatus> nnListing = new TreeMap<>();
+    TreeMap<String, HdfsFileStatus> nnListing = new TreeMap<>();
     int totalRemainingEntries = 0;
     int remainingEntries = 0;
     boolean namenodeListingExists = false;
+    // Check the subcluster listing with the smallest name to make sure
+    // no file is skipped across subclusters
+    String lastName = null;
     if (listings != null) {
-      // Check the subcluster listing with the smallest name
-      String lastName = null;
       for (RemoteResult<RemoteLocation, DirectoryListing> result : listings) {
         if (result.hasException()) {
           IOException ioe = result.getException();
@@ -824,6 +826,10 @@ public class RouterClientProtocol implements ClientProtocol {
 
     // Add mount points at this level in the tree
     final List<String> children = subclusterResolver.getMountPoints(src);
+    // Sort the list as the entries from subcluster are also sorted
+    if (children != null) {
+      Collections.sort(children);
+    }
     if (children != null) {
       // Get the dates for each mount point
       Map<String, Long> dates = getMountPointDates(src);
@@ -838,9 +844,27 @@ public class RouterClientProtocol implements ClientProtocol {
         HdfsFileStatus dirStatus =
             getMountPointStatus(childPath.toString(), 0, date);
 
-        // This may overwrite existing listing entries with the mount point
-        // TODO don't add if already there?
-        nnListing.put(child, dirStatus);
+        // if there is no subcluster path, always add mount point
+        if (lastName == null) {
+          nnListing.put(child, dirStatus);
+        } else {
+          if (shouldAddMountPoint(child,
+                lastName, startAfter, remainingEntries)) {
+            // This may overwrite existing listing entries with the mount point
+            // TODO don't add if already there?
+            nnListing.put(child, dirStatus);
+          }
+        }
+      }
+      // Update the remaining count to include left mount points
+      if (nnListing.size() > 0) {
+        String lastListing = nnListing.lastKey();
+        for (int i = 0; i < children.size(); i++) {
+          if (children.get(i).compareTo(lastListing) > 0) {
+            remainingEntries += (children.size() - i);
+            break;
+          }
+        }
       }
     }
 
@@ -2109,6 +2133,36 @@ public class RouterClientProtocol implements ClientProtocol {
   }
 
   /**
+   * Check if we should add the mount point into the total listing.
+   * This should be done under either of the two cases:
+   * 1) current mount point is between startAfter and cutoff lastEntry.
+   * 2) there are no remaining entries from subclusters and this mount
+   *    point is bigger than all files from subclusters
+   * This is to make sure that the following batch of
+   * getListing call will use the correct startAfter, which is lastEntry from
+   * subcluster.
+   *
+   * @param mountPoint to be added mount point inside router
+   * @param lastEntry biggest listing from subcluster
+   * @param startAfter starting listing from client, used to define listing
+   *                   start boundary
+   * @param remainingEntries how many entries left from subcluster
+   * @return
+   */
+  private static boolean shouldAddMountPoint(
+      String mountPoint, String lastEntry, byte[] startAfter,
+      int remainingEntries) {
+    if (mountPoint.compareTo(DFSUtil.bytes2String(startAfter)) > 0 &&
+        mountPoint.compareTo(lastEntry) <= 0) {
+      return true;
+    }
+    if (remainingEntries == 0 && mountPoint.compareTo(lastEntry) >= 0) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
    * Checks if the path is a directory and is supposed to be present in all
    * subclusters.
    * @param src the source path
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index 131dd74..3933425 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -327,16 +327,30 @@ public class MockResolver
 
   @Override
   public List<String> getMountPoints(String path) throws IOException {
-    // Mounts only supported under root level
-    if (!path.equals("/")) {
-      return null;
-    }
     List<String> mounts = new ArrayList<>();
-    for (String mount : this.locations.keySet()) {
-      if (mount.length() > 1) {
-        // Remove leading slash, this is the behavior of the mount tree,
-        // return only names.
-        mounts.add(mount.replace("/", ""));
+    // for root path search, returning all downstream root level mapping
+    if (path.equals("/")) {
+      // Mounts only supported under root level
+      for (String mount : this.locations.keySet()) {
+        if (mount.length() > 1) {
+          // Remove leading slash, this is the behavior of the mount tree,
+          // return only names.
+          mounts.add(mount.replace("/", ""));
+        }
+      }
+    } else {
+      // a simplified version of MountTableResolver implementation
+      for (String key : this.locations.keySet()) {
+        if (key.startsWith(path)) {
+          String child = key.substring(path.length());
+          if (child.length() > 0) {
+            // only take children so remove parent path and /
+            mounts.add(key.substring(path.length()+1));
+          }
+        }
+      }
+      if (mounts.size() == 0) {
+        mounts = null;
       }
     }
     return mounts;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index bfc712f..082094e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -203,6 +203,9 @@ public class TestRouterRpc {
     cluster.addNamenodeOverrides(namenodeConf);
     cluster.setIndependentDNs();
 
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5);
+    cluster.addNamenodeOverrides(conf);
     // Start NNs and DNs and wait until ready
     cluster.startCluster();
 
@@ -437,6 +440,62 @@ public class TestRouterRpc {
   }
 
   @Test
+  public void testProxyListFilesLargeDir() throws IOException {
+    // Call listStatus against a dir with many files
+    // Create a parent point as well as a subfolder mount
+    // /parent
+    //    ns0 -> /parent
+    // /parent/file-7
+    //    ns0 -> /parent/file-7
+    // /parent/file-0
+    //    ns0 -> /parent/file-0
+    for (RouterContext rc : cluster.getRouters()) {
+      MockResolver resolver =
+          (MockResolver) rc.getRouter().getSubclusterResolver();
+      resolver.addLocation("/parent", ns, "/parent");
+      // file-0 is only in mount table
+      resolver.addLocation("/parent/file-0", ns, "/parent/file-0");
+      // file-7 is both in mount table and in file system
+      resolver.addLocation("/parent/file-7", ns, "/parent/file-7");
+    }
+
+    // Test the case when there is no subcluster path and only mount point
+    FileStatus[] result = routerFS.listStatus(new Path("/parent"));
+    assertEquals(2, result.length);
+    // this makes sure file[0-8] is added in order
+    assertEquals("file-0", result[0].getPath().getName());
+    assertEquals("file-7", result[1].getPath().getName());
+
+    // Create files and test full listing in order
+    NamenodeContext nn = cluster.getNamenode(ns, null);
+    FileSystem nnFileSystem = nn.getFileSystem();
+    for (int i = 1; i < 9; i++) {
+      createFile(nnFileSystem, "/parent/file-"+i, 32);
+    }
+
+    result = routerFS.listStatus(new Path("/parent"));
+    assertEquals(9, result.length);
+    // this makes sure file[0-8] is added in order
+    for (int i = 0; i < 9; i++) {
+      assertEquals("file-"+i, result[i].getPath().getName());
+    }
+
+    // Add file-9 and now this listing will be added from mount point
+    for (RouterContext rc : cluster.getRouters()) {
+      MockResolver resolver =
+          (MockResolver) rc.getRouter().getSubclusterResolver();
+      resolver.addLocation("/parent/file-9", ns, "/parent/file-9");
+    }
+    assertFalse(verifyFileExists(nnFileSystem, "/parent/file-9"));
+    result = routerFS.listStatus(new Path("/parent"));
+    // file-9 will be added by mount point
+    assertEquals(10, result.length);
+    for (int i = 0; i < 10; i++) {
+      assertEquals("file-"+i, result[i].getPath().getName());
+    }
+  }
+
+  @Test
   public void testProxyListFilesWithConflict()
       throws IOException, InterruptedException {
 

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 06/09: HDFS-15252. HttpFS: setWorkingDirectory should not accept invalid paths. Contributed by hemanthboyina.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 14fddba7154191a701f43ac9123954b49b2aabf6
Author: Takanobu Asanuma <ta...@apache.org>
AuthorDate: Thu Apr 2 19:21:02 2020 +0900

    HDFS-15252. HttpFS: setWorkingDirectory should not accept invalid paths. Contributed by hemanthboyina.
    
    (cherry picked from commit 736659e0e1ab2882313e3a41d9a20d4b0f5b0816)
---
 .../org/apache/hadoop/fs/http/client/HttpFSFileSystem.java   |  6 ++++++
 .../org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java | 12 +++++++++++-
 2 files changed, 17 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index cb8468d..e13f44e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@@ -802,6 +803,11 @@ public class HttpFSFileSystem extends FileSystem
    */
   @Override
   public void setWorkingDirectory(Path newDir) {
+    String result = newDir.toUri().getPath();
+    if (!DFSUtilClient.isValidName(result)) {
+      throw new IllegalArgumentException(
+          "Invalid DFS directory name " + result);
+    }
     workingDir = newDir;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 53b9b7a..7182c98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDirHelper;
 import org.apache.hadoop.test.TestHdfs;
@@ -557,9 +558,18 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     fs = getHttpFSFileSystem();
     fs.setWorkingDirectory(new Path("/tmp"));
     workingDir = fs.getWorkingDirectory();
-    fs.close();
     assertEquals(workingDir.toUri().getPath(),
         new Path("/tmp").toUri().getPath());
+    final FileSystem httpFs = getHttpFSFileSystem();
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        "Invalid DFS directory name /foo:bar",
+        () -> httpFs.setWorkingDirectory(new Path("/foo:bar")));
+    fs.setWorkingDirectory(new Path("/bar"));
+    workingDir = fs.getWorkingDirectory();
+    httpFs.close();
+    fs.close();
+    assertEquals(workingDir.toUri().getPath(),
+        new Path("/bar").toUri().getPath());
   }
 
   private void testTrashRoot() throws Exception {

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 05/09: HDFS-15591. RBF: Fix webHdfs file display error. Contributed by wangzhaohui.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2da3356428e9a0415ab5304ec9f4bd3e769782fc
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Mon Sep 28 23:55:31 2020 +0530

    HDFS-15591. RBF: Fix webHdfs file display error. Contributed by wangzhaohui.
    
    (cherry picked from commit fc8a6dd8a92f8e18436d1a73d6e8ef808c583aee)
---
 .../federation/router/RouterClientProtocol.java    |  7 +++--
 .../federation/router/TestRouterMountTable.java    | 31 ++++++++++++++++++++++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 2a0fe7c..744d35d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1977,7 +1977,8 @@ public class RouterClientProtocol implements ClientProtocol {
    * @param date Map with the dates.
    * @return New HDFS file status representing a mount point.
    */
-  private HdfsFileStatus getMountPointStatus(
+  @VisibleForTesting
+  HdfsFileStatus getMountPointStatus(
       String name, int childrenNum, long date) {
     long modTime = date;
     long accessTime = date;
@@ -2028,6 +2029,8 @@ public class RouterClientProtocol implements ClientProtocol {
       }
     }
     long inodeId = 0;
+    Path path = new Path(name);
+    String nameStr = path.getName();
     return new HdfsFileStatus.Builder()
         .isdir(true)
         .mtime(modTime)
@@ -2036,7 +2039,7 @@ public class RouterClientProtocol implements ClientProtocol {
         .owner(owner)
         .group(group)
         .symlink(new byte[0])
-        .path(DFSUtil.string2Bytes(name))
+        .path(DFSUtil.string2Bytes(nameStr))
         .fileId(inodeId)
         .children(childrenNum)
         .flags(flags)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index 77ec47a..6df94c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -291,6 +291,37 @@ public class TestRouterMountTable {
   }
 
   /**
+   * Verify the getMountPointStatus result of passing in different parameters.
+   */
+  @Test
+  public void testGetMountPointStatus() throws IOException {
+    MountTable addEntry = MountTable.newInstance("/testA/testB/testC/testD",
+        Collections.singletonMap("ns0", "/testA/testB/testC/testD"));
+    assertTrue(addMountTable(addEntry));
+    RouterClientProtocol clientProtocol = new RouterClientProtocol(
+        nnFs0.getConf(), routerContext.getRouter().getRpcServer());
+    String src = "/";
+    String child = "testA";
+    Path childPath = new Path(src, child);
+    HdfsFileStatus dirStatus =
+        clientProtocol.getMountPointStatus(childPath.toString(), 0, 0);
+    assertEquals(child, dirStatus.getLocalName());
+
+    String src1 = "/testA";
+    String child1 = "testB";
+    Path childPath1 = new Path(src1, child1);
+    HdfsFileStatus dirStatus1 =
+        clientProtocol.getMountPointStatus(childPath1.toString(), 0, 0);
+    assertEquals(child1, dirStatus1.getLocalName());
+
+    String src2 = "/testA/testB";
+    String child2 = "testC";
+    Path childPath2 = new Path(src2, child2);
+    HdfsFileStatus dirStatus2 =
+        clientProtocol.getMountPointStatus(childPath2.toString(), 0, 0);
+    assertEquals(child2, dirStatus2.getLocalName());
+  }
+  /**
    * GetListing of testPath through router.
    */
   private void getListing(String testPath)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/09: HDFS-15198. RBF: Add test for MountTableRefresherService failed to refresh other router MountTableEntries in secure mode. Contributed by zhengchenyu.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 5bfc3a4c3a8ee8edace051b367043556b7af582d
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Sat Jul 18 15:36:55 2020 +0530

    HDFS-15198. RBF: Add test for MountTableRefresherService failed to refresh other router MountTableEntries in secure mode. Contributed by zhengchenyu.
    
    (cherry picked from commit 8a9a674ef10a951c073ef17ba6db1ff07cff52cd)
---
 .../server/federation/FederationTestUtils.java     |   2 +-
 .../TestRouterMountTableCacheRefreshSecure.java    | 344 +++++++++++++++++++++
 2 files changed, 345 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index fd6dc7f..bf6f5a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -187,7 +187,7 @@ public final class FederationTestUtils {
         }
         return false;
       }
-    }, 1000, 20 * 1000);
+    }, 1000, 60 * 1000);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefreshSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefreshSecure.java
new file mode 100644
index 0000000..0cfdaea
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefreshSecure.java
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.server.federation.FederationTestUtils;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.store.RouterStore;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.Time;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test class verifies that mount table cache is updated on all the routers
+ * when MountTableRefreshService and security mode are enabled and there is a
+ * change in mount table entries.
+ */
+public class TestRouterMountTableCacheRefreshSecure {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterMountTableCacheRefreshSecure.class);
+
+  private static TestingServer curatorTestingServer;
+  private static MiniRouterDFSCluster cluster;
+  private static RouterContext routerContext;
+  private static MountTableManager mountTableManager;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    curatorTestingServer = new TestingServer();
+    curatorTestingServer.start();
+    final String connectString = curatorTestingServer.getConnectString();
+    int numNameservices = 2;
+    Configuration conf = new RouterConfigBuilder().refreshCache().admin().rpc()
+        .heartbeat().build();
+    conf.addResource(initSecurity());
+    conf.setClass(RBFConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
+        StateStoreZooKeeperImpl.class, StateStoreDriver.class);
+    conf.setClass(RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
+        RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT,
+        FileSubclusterResolver.class);
+    conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString);
+    conf.setBoolean(RBFConfigKeys.DFS_ROUTER_STORE_ENABLE, true);
+    cluster = new MiniRouterDFSCluster(false, numNameservices, conf);
+    cluster.addRouterOverrides(conf);
+    cluster.startCluster(conf);
+    cluster.startRouters();
+    cluster.waitClusterUp();
+    routerContext = cluster.getRandomRouter();
+    RouterStore routerStateManager =
+        routerContext.getRouter().getRouterStateManager();
+    mountTableManager = routerContext.getAdminClient().getMountTableManager();
+    // wait for one minute for all the routers to get registered
+    FederationTestUtils.waitRouterRegistered(routerStateManager,
+        numNameservices, 60000);
+  }
+
+  @AfterClass
+  public static void destory() {
+    try {
+      curatorTestingServer.close();
+      cluster.shutdown();
+    } catch (IOException e) {
+      LOG.error("Found error when destroy, caused by: {}", e.getMessage());
+    }
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    clearEntries();
+  }
+
+  private void clearEntries() throws IOException {
+    List<MountTable> result = getMountTableEntries();
+    for (MountTable mountTable : result) {
+      RemoveMountTableEntryResponse removeMountTableEntry = mountTableManager.
+          removeMountTableEntry(RemoveMountTableEntryRequest.
+          newInstance(mountTable.getSourcePath()));
+      assertTrue(removeMountTableEntry.getStatus());
+    }
+  }
+
+  /**
+   * addMountTableEntry API should internally update the cache on all the
+   * routers.
+   */
+  @Test
+  public void testMountTableEntriesCacheUpdatedAfterAddAPICall()
+      throws IOException {
+    // Existing mount table size
+    String srcPath = "/addPath";
+    MountTable newEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap("ns0", "/addPathDest"), Time.now(),
+        Time.now());
+    addMountTableEntry(mountTableManager, newEntry);
+
+    // When add entry is done, all the routers must have updated its mount table
+    // entry
+    List<RouterContext> routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result = getMountTableEntries(rc.getAdminClient()
+          .getMountTableManager());
+      assertEquals(1, result.size());
+      MountTable mountTableResult = result.get(0);
+      assertEquals(srcPath, mountTableResult.getSourcePath());
+    }
+  }
+
+  /**
+   * removeMountTableEntry API should internally update the cache on all the
+   * routers.
+   */
+  @Test
+  public void testMountTableEntriesCacheUpdatedAfterRemoveAPICall()
+      throws IOException {
+    // add
+    String srcPath = "/removePathSrc";
+    MountTable newEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap("ns0", "/removePathDest"), Time.now(),
+        Time.now());
+    addMountTableEntry(mountTableManager, newEntry);
+
+    // When add entry is done, all the routers must have updated its mount
+    // table entry
+    List<RouterContext> routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(1, result.size());
+      MountTable mountTableResult = result.get(0);
+      assertEquals(srcPath, mountTableResult.getSourcePath());
+    }
+
+    // remove
+    RemoveMountTableEntryResponse removeMountTableEntry =
+        mountTableManager.removeMountTableEntry(
+            RemoveMountTableEntryRequest.newInstance(srcPath));
+    assertTrue(removeMountTableEntry.getStatus());
+
+    // When remove entry is done, all the routers must have removed its mount
+    // table entry
+    routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(0, result.size());
+    }
+  }
+
+  /**
+   * updateMountTableEntry API should internally update the cache on all the
+   * routers.
+   */
+  @Test
+  public void testMountTableEntriesCacheUpdatedAfterUpdateAPICall()
+      throws IOException {
+    // add
+    String srcPath = "/updatePathSrc";
+    String dstPath = "/updatePathDest";
+    String nameServiceId = "ns0";
+    MountTable newEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap("ns0", "/updatePathDest"), Time.now(),
+        Time.now());
+    addMountTableEntry(mountTableManager, newEntry);
+
+    // When add entry is done, all the routers must have updated its mount table
+    // entry
+    List<RouterContext> routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(1, result.size());
+      MountTable mountTableResult = result.get(0);
+      assertEquals(srcPath, mountTableResult.getSourcePath());
+      assertEquals(nameServiceId,
+          mountTableResult.getDestinations().get(0).getNameserviceId());
+      assertEquals(dstPath,
+          mountTableResult.getDestinations().get(0).getDest());
+    }
+
+    // update
+    String key = "ns1";
+    String value = "/updatePathDest2";
+    MountTable upateEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap(key, value), Time.now(), Time.now());
+    UpdateMountTableEntryResponse updateMountTableEntry =
+        mountTableManager.updateMountTableEntry(
+            UpdateMountTableEntryRequest.newInstance(upateEntry));
+    assertTrue(updateMountTableEntry.getStatus());
+    MountTable updatedMountTable = getMountTableEntry(srcPath);
+    assertNotNull("Updated mount table entrty cannot be null",
+        updatedMountTable);
+
+    // When update entry is done, all the routers must have updated its mount
+    // table entry
+    routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(1, result.size());
+      MountTable mountTableResult = result.get(0);
+      assertEquals(srcPath, mountTableResult.getSourcePath());
+      assertEquals(key, updatedMountTable.getDestinations().get(0)
+          .getNameserviceId());
+      assertEquals(value, updatedMountTable.getDestinations().get(0).getDest());
+    }
+  }
+
+  /**
+   * After caching RouterClient if router goes down, refresh should be
+   * successful on other available router. The router which is not running
+   * should be ignored.
+   */
+  @Test
+  public void testCachedRouterClientBehaviourAfterRouterStoped()
+      throws IOException {
+    String srcPath = "/addPathClientCache";
+    MountTable newEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap("ns0", "/addPathClientCacheDest"), Time.now(),
+        Time.now());
+    addMountTableEntry(mountTableManager, newEntry);
+
+    // When Add entry is done, all the routers must have updated its mount table
+    // entry
+    List<RouterContext> routers = getRouters();
+    for (RouterContext rc : routers) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(1, result.size());
+      MountTable mountTableResult = result.get(0);
+      assertEquals(srcPath, mountTableResult.getSourcePath());
+    }
+
+    // Lets stop one router
+    for (RouterContext rc : routers) {
+      InetSocketAddress adminServerAddress = rc.getRouter()
+          .getAdminServerAddress();
+      if (!routerContext.getRouter().getAdminServerAddress()
+          .equals(adminServerAddress)) {
+        cluster.stopRouter(rc);
+        break;
+      }
+    }
+
+    srcPath = "/addPathClientCache2";
+    newEntry = MountTable.newInstance(srcPath,
+        Collections.singletonMap("ns0", "/addPathClientCacheDest2"), Time.now(),
+        Time.now());
+    addMountTableEntry(mountTableManager, newEntry);
+    for (RouterContext rc : getRouters()) {
+      List<MountTable> result =
+          getMountTableEntries(rc.getAdminClient().getMountTableManager());
+      assertEquals(2, result.size());
+    }
+  }
+
+  private List<RouterContext> getRouters() {
+    List<RouterContext> result = new ArrayList<>();
+    for (RouterContext rc : cluster.getRouters()) {
+      if (rc.getRouter().getServiceState() == STATE.STARTED) {
+        result.add(rc);
+      }
+    }
+    return result;
+  }
+
+  private MountTable getMountTableEntry(String srcPath) throws IOException {
+    List<MountTable> mountTableEntries = getMountTableEntries();
+    for (MountTable mountTable : mountTableEntries) {
+      String sourcePath = mountTable.getSourcePath();
+      if (srcPath.equals(sourcePath)) {
+        return mountTable;
+      }
+    }
+    return null;
+  }
+
+  private void addMountTableEntry(MountTableManager mountTableMgr,
+      MountTable newEntry) throws IOException {
+    AddMountTableEntryRequest addRequest =
+        AddMountTableEntryRequest.newInstance(newEntry);
+    AddMountTableEntryResponse addResponse =
+        mountTableMgr.addMountTableEntry(addRequest);
+    assertTrue(addResponse.getStatus());
+  }
+
+  private List<MountTable> getMountTableEntries() throws IOException {
+    return getMountTableEntries(mountTableManager);
+  }
+
+  private List<MountTable> getMountTableEntries(
+      MountTableManager mountTableManagerParam) throws IOException {
+    GetMountTableEntriesRequest request =
+        GetMountTableEntriesRequest.newInstance("/");
+    return mountTableManagerParam.getMountTableEntries(request).getEntries();
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 07/09: HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 02d37c7d22985cae323aef3f05cb48361ed414c4
Author: Surendra Singh Lilhore <su...@apache.org>
AuthorDate: Wed May 13 15:01:07 2020 +0530

    HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina
    
    (cherry picked from commit 743c2e9071f4a73e0196ad4ca005b767758642b9)
---
 .../hadoop/hdfs/server/namenode/FSDirDeleteOp.java |  4 +-
 .../hdfs/server/namenode/TestDeleteRace.java       | 46 ++++++++++++++++++++++
 2 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 47f25f6..2dfb90e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -61,10 +61,10 @@ class FSDirDeleteOp {
             removedUCFiles);
         if (unprotectedDelete(fsd, iip, context, mtime)) {
           filesRemoved = context.quotaDelta().getNsDelta();
+          fsn.removeSnapshottableDirs(snapshottableDirs);
         }
         fsd.updateReplicationFactor(context.collectedBlocks()
                                         .toUpdateReplicationInfo());
-        fsn.removeSnapshottableDirs(snapshottableDirs);
         fsd.updateCount(iip, context.quotaDelta(), false);
       }
     } finally {
@@ -144,9 +144,9 @@ class FSDirDeleteOp {
         new ReclaimContext(fsd.getBlockStoragePolicySuite(),
             collectedBlocks, removedINodes, removedUCFiles),
         mtime);
-    fsn.removeSnapshottableDirs(snapshottableDirs);
 
     if (filesRemoved) {
+      fsn.removeSnapshottableDirs(snapshottableDirs);
       fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
       fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index cdb57f2..9d32528 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -66,6 +66,8 @@ import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
 import static org.junit.Assert.assertNotEquals;
 
 /**
@@ -492,4 +494,48 @@ public class TestDeleteRace {
       }
     }
   }
+
+  /**
+   * Test get snapshot diff on a directory which delete got failed.
+   */
+  @Test
+  public void testDeleteOnSnapshottableDir() throws Exception {
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
+        true);
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+      FSNamesystem fsn = cluster.getNamesystem();
+      FSDirectory fsdir = fsn.getFSDirectory();
+      Path dir = new Path("/dir");
+      hdfs.mkdirs(dir);
+      hdfs.allowSnapshot(dir);
+      Path file1 = new Path(dir, "file1");
+      Path file2 = new Path(dir, "file2");
+
+      // directory should not get deleted
+      FSDirectory fsdir2 = Mockito.spy(fsdir);
+      cluster.getNamesystem().setFSDirectory(fsdir2);
+      Mockito.doReturn(-1L).when(fsdir2).removeLastINode(any());
+      hdfs.delete(dir, true);
+
+      // create files and create snapshots
+      DFSTestUtil.createFile(hdfs, file1, BLOCK_SIZE, (short) 1, 0);
+      hdfs.createSnapshot(dir, "s1");
+      DFSTestUtil.createFile(hdfs, file2, BLOCK_SIZE, (short) 1, 0);
+      hdfs.createSnapshot(dir, "s2");
+
+      // should able to get snapshot diff on ancestor dir
+      Path dirDir1 = new Path(dir, "dir1");
+      hdfs.mkdirs(dirDir1);
+      hdfs.getSnapshotDiffReport(dirDir1, "s2", "s1");
+      assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 03/09: HDFS-15300. RBF: updateActiveNamenode() is invalid when RPC address is IP. Contributed by xuzq.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a549b4a82efc2aed1c814210f112a356ab2905ea
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Tue May 12 21:48:44 2020 +0530

    HDFS-15300. RBF: updateActiveNamenode() is invalid when RPC address is IP. Contributed by xuzq.
    
    (cherry picked from commit 936bf09c3745cfec26fa9cfa0562f88b1f8be133)
---
 .../src/main/java/org/apache/hadoop/net/NetUtils.java   | 16 ++++++++++++++++
 .../federation/resolver/MembershipNamenodeResolver.java |  4 +++-
 .../hdfs/server/federation/FederationTestUtils.java     | 11 +++++++++--
 .../federation/resolver/TestNamenodeResolver.java       | 17 +++++++++++++++++
 4 files changed, 45 insertions(+), 3 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 5ded4b4..6c7e443 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -694,6 +694,22 @@ public class NetUtils {
   }
 
   /**
+   * Attempt to normalize the given string to "host:port"
+   * if it like "ip:port".
+   *
+   * @param ipPort maybe lik ip:port or host:port.
+   * @return host:port
+   */
+  public static String normalizeIP2HostName(String ipPort) {
+    if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
+      return ipPort;
+    }
+
+    InetSocketAddress address = createSocketAddr(ipPort);
+    return getHostPortString(address);
+  }
+
+  /**
    * Return hostname without throwing exception.
    * The returned hostname String format is "hostname".
    * @return hostname
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
index 6117e9a..6629003 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeat
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeRegistrationRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -263,7 +264,8 @@ public class MembershipNamenodeResolver
 
     MembershipState record = MembershipState.newInstance(
         routerId, report.getNameserviceId(), report.getNamenodeId(),
-        report.getClusterId(), report.getBlockPoolId(), report.getRpcAddress(),
+        report.getClusterId(), report.getBlockPoolId(),
+        NetUtils.normalizeIP2HostName(report.getRpcAddress()),
         report.getServiceAddress(), report.getLifelineAddress(),
         report.getWebScheme(), report.getWebAddress(), report.getState(),
         report.getSafemode());
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index bf6f5a8..2017a45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -138,8 +138,15 @@ public final class FederationTestUtils {
   public static NamenodeStatusReport createNamenodeReport(String ns, String nn,
       HAServiceState state) {
     Random rand = new Random();
-    NamenodeStatusReport report = new NamenodeStatusReport(ns, nn,
-        "localhost:" + rand.nextInt(10000), "localhost:" + rand.nextInt(10000),
+    return createNamenodeReport(ns, nn, "localhost:"
+        + rand.nextInt(10000), state);
+  }
+
+  public static NamenodeStatusReport createNamenodeReport(String ns, String nn,
+      String rpcAddress, HAServiceState state) {
+    Random rand = new Random();
+    NamenodeStatusReport report = new NamenodeStatusReport(ns, nn, rpcAddress,
+        "localhost:" + rand.nextInt(10000),
         "localhost:" + rand.nextInt(10000), "http",
         "testwebaddress-" + ns + nn);
     if (state == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
index 932c861..df80037 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
@@ -307,6 +307,23 @@ public class TestNamenodeResolver {
         FederationNamenodeServiceState.ACTIVE, namenode1.getState());
   }
 
+  @Test
+  public void testCacheUpdateOnNamenodeStateUpdateWithIp()
+      throws IOException {
+    final String rpcAddress = "127.0.0.1:10000";
+    assertTrue(namenodeResolver.registerNamenode(
+        createNamenodeReport(NAMESERVICES[0], NAMENODES[0], rpcAddress,
+            HAServiceState.STANDBY)));
+    stateStore.refreshCaches(true);
+
+    InetSocketAddress inetAddr = getInetSocketAddress(rpcAddress);
+    namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr);
+    FederationNamenodeContext namenode =
+        namenodeResolver.getNamenodesForNameserviceId(NAMESERVICES[0]).get(0);
+    assertEquals("The namenode state should be ACTIVE post update.",
+        FederationNamenodeServiceState.ACTIVE, namenode.getState());
+  }
+
   /**
    * Creates InetSocketAddress from the given RPC address.
    * @param rpcAddr RPC address (host:port).

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org