You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2022/04/28 17:30:02 UTC

[hadoop] branch trunk updated (bda0881bf90 -> 9ed8d60511d)

This is an automated email from the ASF dual-hosted git repository.

stack pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


    from bda0881bf90 HDFS-16540 Data locality is lost when DataNode pod restarts in kubernetes (#4170)
     new 4e47eb66d12 Revert "HDFS-16540 Data locality is lost when DataNode pod restarts in kubernetes (#4170)"
     new 9ed8d60511d HDFS-16540. Data locality is lost when DataNode pod restarts in kubernetes (#4170)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/02: Revert "HDFS-16540 Data locality is lost when DataNode pod restarts in kubernetes (#4170)"

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4e47eb66d123014b653d2a2c278a35366902d8a0
Author: stack <st...@apache.org>
AuthorDate: Thu Apr 28 10:26:46 2022 -0700

    Revert "HDFS-16540 Data locality is lost when DataNode pod restarts in kubernetes (#4170)"
    
    Revert to add the '.' after HDFS-16540 so commit message format matches
    precedent
    
    This reverts commit bda0881bf90ae64b32dbaf7b42413e18598e434d.
---
 .BUILDING.txt.swp                                  | Bin 16384 -> 0 bytes
 .../server/blockmanagement/DatanodeManager.java    |  17 +-----------
 .../blockmanagement/TestDatanodeManager.java       |  29 ---------------------
 3 files changed, 1 insertion(+), 45 deletions(-)

diff --git a/.BUILDING.txt.swp b/.BUILDING.txt.swp
deleted file mode 100644
index 1fb0c25d0a5..00000000000
Binary files a/.BUILDING.txt.swp and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index fa4f573da56..a9850aa7f5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1171,7 +1171,6 @@ public class DatanodeManager {
         nodeN = null;
       }
   
-      boolean updateHost2DatanodeMap = false;
       if (nodeS != null) {
         if (nodeN == nodeS) {
           // The same datanode has been just restarted to serve the same data 
@@ -1190,11 +1189,7 @@ public class DatanodeManager {
             nodes with its data cleared (or user can just remove the StorageID
             value in "VERSION" file under the data directory of the datanode,
             but this is might not work if VERSION file format has changed 
-         */
-          // Check if nodeS's host information is same as nodeReg's, if not,
-          // it needs to update host2DatanodeMap accordringly.
-          updateHost2DatanodeMap = !nodeS.getXferAddr().equals(nodeReg.getXferAddr());
-
+         */        
           NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS
               + " is replaced by " + nodeReg + " with the same storageID "
               + nodeReg.getDatanodeUuid());
@@ -1204,11 +1199,6 @@ public class DatanodeManager {
         try {
           // update cluster map
           getNetworkTopology().remove(nodeS);
-
-          // Update Host2DatanodeMap
-          if (updateHost2DatanodeMap) {
-            getHost2DatanodeMap().remove(nodeS);
-          }
           if(shouldCountVersion(nodeS)) {
             decrementVersionCount(nodeS.getSoftwareVersion());
           }
@@ -1227,11 +1217,6 @@ public class DatanodeManager {
             nodeS.setDependentHostNames(
                 getNetworkDependenciesWithDefault(nodeS));
           }
-
-          if (updateHost2DatanodeMap) {
-            getHost2DatanodeMap().add(nodeS);
-          }
-
           getNetworkTopology().add(nodeS);
           resolveUpgradeDomain(nodeS);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 232424d4404..5f5452ac16d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -138,35 +138,6 @@ public class TestDatanodeManager {
         mapToCheck.get("version1").intValue(), 1);
   }
 
-  /**
-   * This test checks that if a node is re-registered with a different ip, its
-   * host2DatanodeMap is correctly updated with the new ip.
-   */
-  @Test
-  public void testHost2NodeMapCorrectAfterReregister()
-          throws IOException, InterruptedException {
-    //Create the DatanodeManager which will be tested
-    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
-    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
-    Configuration conf = new Configuration();
-    DatanodeManager dm = mockDatanodeManager(fsn, conf);
-
-    String storageID = "someStorageID1";
-    String ipOld = "someIPOld" + storageID;
-    String ipNew = "someIPNew" + storageID;
-
-    dm.registerDatanode(new DatanodeRegistration(
-            new DatanodeID(ipOld, "", storageID, 9000, 0, 0, 0),
-            null, null, "version"));
-
-    dm.registerDatanode(new DatanodeRegistration(
-            new DatanodeID(ipNew, "", storageID, 9000, 0, 0, 0),
-            null, null, "version"));
-
-    assertNull("should be no node with old ip", dm.getDatanodeByHost(ipOld));
-    assertNotNull("should be a node with new ip", dm.getDatanodeByHost(ipNew));
-  }
-
   /**
    * This test sends a random sequence of node registrations and node removals
    * to the DatanodeManager (of nodes with different IDs and versions), and


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/02: HDFS-16540. Data locality is lost when DataNode pod restarts in kubernetes (#4170)

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9ed8d60511dccf96108239c5c96e108a7d4bc975
Author: stack <st...@apache.org>
AuthorDate: Thu Apr 28 10:28:01 2022 -0700

    HDFS-16540. Data locality is lost when DataNode pod restarts in kubernetes (#4170)
    
    This reverts the previous commit 4e47eb66d123014b653d2a2c278a35366902d8a0
    undone so I could reapply with the '.' after the HDFS-16540 as is done
    in all other commits.
---
 .BUILDING.txt.swp                                  | Bin 0 -> 16384 bytes
 .../server/blockmanagement/DatanodeManager.java    |  17 +++++++++++-
 .../blockmanagement/TestDatanodeManager.java       |  29 +++++++++++++++++++++
 3 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/.BUILDING.txt.swp b/.BUILDING.txt.swp
new file mode 100644
index 00000000000..1fb0c25d0a5
Binary files /dev/null and b/.BUILDING.txt.swp differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a9850aa7f5a..fa4f573da56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1171,6 +1171,7 @@ public class DatanodeManager {
         nodeN = null;
       }
   
+      boolean updateHost2DatanodeMap = false;
       if (nodeS != null) {
         if (nodeN == nodeS) {
           // The same datanode has been just restarted to serve the same data 
@@ -1189,7 +1190,11 @@ public class DatanodeManager {
             nodes with its data cleared (or user can just remove the StorageID
             value in "VERSION" file under the data directory of the datanode,
             but this is might not work if VERSION file format has changed 
-         */        
+         */
+          // Check if nodeS's host information is same as nodeReg's, if not,
+          // it needs to update host2DatanodeMap accordringly.
+          updateHost2DatanodeMap = !nodeS.getXferAddr().equals(nodeReg.getXferAddr());
+
           NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS
               + " is replaced by " + nodeReg + " with the same storageID "
               + nodeReg.getDatanodeUuid());
@@ -1199,6 +1204,11 @@ public class DatanodeManager {
         try {
           // update cluster map
           getNetworkTopology().remove(nodeS);
+
+          // Update Host2DatanodeMap
+          if (updateHost2DatanodeMap) {
+            getHost2DatanodeMap().remove(nodeS);
+          }
           if(shouldCountVersion(nodeS)) {
             decrementVersionCount(nodeS.getSoftwareVersion());
           }
@@ -1217,6 +1227,11 @@ public class DatanodeManager {
             nodeS.setDependentHostNames(
                 getNetworkDependenciesWithDefault(nodeS));
           }
+
+          if (updateHost2DatanodeMap) {
+            getHost2DatanodeMap().add(nodeS);
+          }
+
           getNetworkTopology().add(nodeS);
           resolveUpgradeDomain(nodeS);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 5f5452ac16d..232424d4404 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -138,6 +138,35 @@ public class TestDatanodeManager {
         mapToCheck.get("version1").intValue(), 1);
   }
 
+  /**
+   * This test checks that if a node is re-registered with a different ip, its
+   * host2DatanodeMap is correctly updated with the new ip.
+   */
+  @Test
+  public void testHost2NodeMapCorrectAfterReregister()
+          throws IOException, InterruptedException {
+    //Create the DatanodeManager which will be tested
+    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+    Configuration conf = new Configuration();
+    DatanodeManager dm = mockDatanodeManager(fsn, conf);
+
+    String storageID = "someStorageID1";
+    String ipOld = "someIPOld" + storageID;
+    String ipNew = "someIPNew" + storageID;
+
+    dm.registerDatanode(new DatanodeRegistration(
+            new DatanodeID(ipOld, "", storageID, 9000, 0, 0, 0),
+            null, null, "version"));
+
+    dm.registerDatanode(new DatanodeRegistration(
+            new DatanodeID(ipNew, "", storageID, 9000, 0, 0, 0),
+            null, null, "version"));
+
+    assertNull("should be no node with old ip", dm.getDatanodeByHost(ipOld));
+    assertNotNull("should be a node with new ip", dm.getDatanodeByHost(ipNew));
+  }
+
   /**
    * This test sends a random sequence of node registrations and node removals
    * to the DatanodeManager (of nodes with different IDs and versions), and


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org